{
  "updated": "2026-04-28T19:09:03.342Z",
  "source": "https://api.cloudflare.com/client/v4/accounts/0460574641fdbb98159c98ebf593e2bd/ai/models/search",
  "total": 91,
  "total_upstream": 91,
  "deprecated_filtered": 0,
  "models": [
    {
      "id": "@cf/ai4bharat/indictrans2-en-indic-1B",
      "short_name": "Indictrans2 En Indic 1B",
      "provider": "ai4bharat",
      "task": "translation",
      "params": "1B",
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "fast",
      "description": "IndicTrans2 is the first open-source transformer-based multilingual NMT model that supports high-quality translations across all the 22 scheduled Indic languages",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2025-09-23 18:19:17.382"
    },
    {
      "id": "@cf/aisingapore/gemma-sea-lion-v4-27b-it",
      "short_name": "Gemma Sea Lion V4 27B IT",
      "provider": "aisingapore",
      "task": "text-generation",
      "params": "27B",
      "context_length": 128000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "SEA-LION stands for Southeast Asian Languages In One Network, which is a collection of Large Language Models (LLMs) which have been pretrained and instruct-tuned for the Southeast Asia (SEA) region.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-09-23 19:27:30.468"
    },
    {
      "id": "@cf/baai/bge-reranker-base",
      "short_name": "BGE Reranker Base",
      "provider": "baai",
      "task": "text-classification",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. You can get a relevance score by inputting query and passage to the reranker. And the score can be mapped to a float value in [0,1] by sigmoid function.\n\n",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-02-14 12:28:19.009"
    },
    {
      "id": "@cf/baai/bge-large-en-v1.5",
      "short_name": "BGE Large En V1.5",
      "provider": "baai",
      "task": "text-embeddings",
      "params": null,
      "context_length": null,
      "max_input": 512,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "BAAI general embedding (Large) model that transforms any given text into a 1024-dimensional vector",
      "docs_url": "https://huggingface.co/BAAI/bge-large-en-v1.5",
      "licence_url": null,
      "flagship": true,
      "created_at": "2023-11-07 15:43:58.042"
    },
    {
      "id": "@cf/baai/bge-m3",
      "short_name": "BGE M3",
      "provider": "baai",
      "task": "text-embeddings",
      "params": null,
      "context_length": 60000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "Multi-Functionality, Multi-Linguality, and Multi-Granularity embeddings model.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2024-05-22 19:27:09.781"
    },
    {
      "id": "@cf/baai/bge-base-en-v1.5",
      "short_name": "BGE Base En V1.5",
      "provider": "baai",
      "task": "text-embeddings",
      "params": null,
      "context_length": 153600,
      "max_input": 512,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "BAAI general embedding (Base) model that transforms any given text into a 768-dimensional vector",
      "docs_url": "https://huggingface.co/BAAI/bge-base-en-v1.5",
      "licence_url": null,
      "flagship": false,
      "created_at": "2023-09-25 19:21:11.898"
    },
    {
      "id": "@cf/baai/bge-small-en-v1.5",
      "short_name": "BGE Small En V1.5",
      "provider": "baai",
      "task": "text-embeddings",
      "params": null,
      "context_length": null,
      "max_input": 512,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "fast",
      "description": "BAAI general embedding (Small) model that transforms any given text into a 384-dimensional vector",
      "docs_url": "https://huggingface.co/BAAI/bge-small-en-v1.5",
      "licence_url": null,
      "flagship": false,
      "created_at": "2023-11-07 15:43:58.042"
    },
    {
      "id": "@cf/black-forest-labs/flux-1-schnell",
      "short_name": "Flux 1 Schnell",
      "provider": "black-forest-labs",
      "task": "text-to-image",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "FLUX.1 [schnell] is a 12 billion parameter rectified flow transformer capable of generating images from text descriptions. ",
      "docs_url": null,
      "licence_url": "https://bfl.ai/legal/terms-of-service",
      "flagship": true,
      "created_at": "2024-08-29 16:37:39.541"
    },
    {
      "id": "@cf/black-forest-labs/flux-2-dev",
      "short_name": "Flux 2 Dev",
      "provider": "black-forest-labs",
      "task": "text-to-image",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "FLUX.2 [dev] is an image model from Black Forest Labs where you can generate highly realistic and detailed images, with multi-reference support.",
      "docs_url": null,
      "licence_url": "https://bfl.ai/legal/terms-of-service",
      "flagship": true,
      "created_at": "2025-11-24 15:44:06.050"
    },
    {
      "id": "@cf/black-forest-labs/flux-2-klein-9b",
      "short_name": "Flux 2 Klein 9B",
      "provider": "black-forest-labs",
      "task": "text-to-image",
      "params": "9B",
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "FLUX.2 [klein] 9B is a 9 billion parameter model that can generate images from text descriptions and supports multi-reference editing capabilities.",
      "docs_url": null,
      "licence_url": "https://bfl.ai/legal/terms-of-service",
      "flagship": false,
      "created_at": "2026-01-14 12:55:54.294"
    },
    {
      "id": "@cf/black-forest-labs/flux-2-klein-4b",
      "short_name": "Flux 2 Klein 4B",
      "provider": "black-forest-labs",
      "task": "text-to-image",
      "params": "4B",
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "FLUX.2 [klein] is an ultra-fast, distilled image model. It unifies image generation and editing in a single model, delivering state-of-the-art quality enabling interactive workflows, real-time previews, and latency-critical applications.",
      "docs_url": null,
      "licence_url": "https://bfl.ai/legal/terms-of-service",
      "flagship": false,
      "created_at": "2026-01-14 12:54:55.024"
    },
    {
      "id": "@cf/bytedance/stable-diffusion-xl-lightning",
      "short_name": "Stable Diffusion Xl Lightning",
      "provider": "bytedance",
      "task": "text-to-image",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "SDXL-Lightning is a lightning-fast text-to-image generation model. It can generate high-quality 1024px images in a few steps.",
      "docs_url": "https://huggingface.co/ByteDance/SDXL-Lightning",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 17:41:29.578"
    },
    {
      "id": "@cf/deepgram/flux",
      "short_name": "Flux",
      "provider": "deepgram",
      "task": "automatic-speech-recognition",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "Flux is the first conversational speech recognition model built specifically for voice agents.",
      "docs_url": null,
      "licence_url": "https://deepgram.com/terms",
      "flagship": true,
      "created_at": "2025-09-29 21:07:55.114"
    },
    {
      "id": "@cf/deepgram/nova-3",
      "short_name": "Nova 3",
      "provider": "deepgram",
      "task": "automatic-speech-recognition",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "Transcribe audio using Deepgram’s speech-to-text model",
      "docs_url": null,
      "licence_url": "https://deepgram.com/terms",
      "flagship": true,
      "created_at": "2025-06-05 16:05:15.199"
    },
    {
      "id": "@cf/deepgram/aura-2-es",
      "short_name": "Aura 2 Es",
      "provider": "deepgram",
      "task": "text-to-speech",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "Aura-2 is a context-aware text-to-speech (TTS) model that applies natural pacing, expressiveness, and fillers based on the context of the provided text. The quality of your text input directly impacts the naturalness of the audio output.",
      "docs_url": null,
      "licence_url": "https://deepgram.com/terms",
      "flagship": true,
      "created_at": "2025-10-09 22:42:37.002"
    },
    {
      "id": "@cf/deepgram/aura-1",
      "short_name": "Aura 1",
      "provider": "deepgram",
      "task": "text-to-speech",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Aura is a context-aware text-to-speech (TTS) model that applies natural pacing, expressiveness, and fillers based on the context of the provided text. The quality of your text input directly impacts the naturalness of the audio output.",
      "docs_url": null,
      "licence_url": "https://deepgram.com/terms",
      "flagship": false,
      "created_at": "2025-08-27 01:18:18.880"
    },
    {
      "id": "@cf/deepgram/aura-2-en",
      "short_name": "Aura 2 En",
      "provider": "deepgram",
      "task": "text-to-speech",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Aura-2 is a context-aware text-to-speech (TTS) model that applies natural pacing, expressiveness, and fillers based on the context of the provided text. The quality of your text input directly impacts the naturalness of the audio output.",
      "docs_url": null,
      "licence_url": "https://deepgram.com/terms",
      "flagship": false,
      "created_at": "2025-10-09 22:19:34.483"
    },
    {
      "id": "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
      "short_name": "DeepSeek R1 Distill Qwen 32B",
      "provider": "deepseek-ai",
      "task": "text-generation",
      "params": "32B",
      "context_length": 80000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": true,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "DeepSeek-R1-Distill-Qwen-32B is a model distilled from DeepSeek-R1 based on Qwen2.5. It outperforms OpenAI-o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.",
      "docs_url": null,
      "licence_url": "https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE",
      "flagship": true,
      "created_at": "2025-01-22 19:48:55.776"
    },
    {
      "id": "@cf/deepseek-ai/deepseek-math-7b-instruct",
      "short_name": "DeepSeek Math 7B Instruct",
      "provider": "deepseek-ai",
      "task": "text-generation",
      "params": "7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "DeepSeekMath-Instruct 7B is a mathematically instructed tuning model derived from DeepSeekMath-Base 7B. DeepSeekMath is initialized with DeepSeek-Coder-v1.5 7B and continues pre-training on math-related tokens sourced from Common Crawl, together with natural language and code data for 500B tokens.",
      "docs_url": "https://huggingface.co/deepseek-ai/deepseek-math-7b-instruct",
      "licence_url": "https://github.com/deepseek-ai/DeepSeek-Math/blob/main/LICENSE-MODEL",
      "flagship": true,
      "created_at": "2024-02-27 17:54:17.459"
    },
    {
      "id": "@cf/defog/sqlcoder-7b-2",
      "short_name": "Sqlcoder 7B 2",
      "provider": "defog",
      "task": "text-generation",
      "params": "7B",
      "context_length": 10000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "This model is intended to be used by non-technical users to understand data inside their SQL databases. ",
      "docs_url": "https://huggingface.co/defog/sqlcoder-7b-2",
      "licence_url": "https://creativecommons.org/licenses/by-sa/4.0/deed.en",
      "flagship": true,
      "created_at": "2024-02-27 18:18:46.095"
    },
    {
      "id": "@cf/facebook/bart-large-cnn",
      "short_name": "Bart Large Cnn",
      "provider": "facebook",
      "task": "summarization",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "BART is a transformer encoder-encoder (seq2seq) model with a bidirectional (BERT-like) encoder and an autoregressive (GPT-like) decoder. You can use this model for text summarization.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2024-02-27 18:28:11.833"
    },
    {
      "id": "@cf/fblgit/una-cybertron-7b-v2-bf16",
      "short_name": "Una Cybertron 7B V2 BF16",
      "provider": "fblgit",
      "task": "text-generation",
      "params": "7B",
      "context_length": 15000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Cybertron 7B v2 is a 7B MistralAI based model, best on it's series. It was trained with SFT, DPO and UNA (Unified Neural Alignment) on multiple datasets.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-04-24 14:37:19.494"
    },
    {
      "id": "@cf/google/embeddinggemma-300m",
      "short_name": "Embeddinggemma 300M",
      "provider": "google",
      "task": "text-embeddings",
      "params": "300M",
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "EmbeddingGemma is a 300M parameter, state-of-the-art for its size, open embedding model from Google, built from Gemma 3 (with T5Gemma initialization) and the same research and technology used to create Gemini models. EmbeddingGemma produces vector representations of text, making it well-suited for search and retrieval tasks, including classification, clustering, and semantic similarity search. This model was trained with data in 100+ spoken languages.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-09-04 16:38:44.980"
    },
    {
      "id": "@cf/google/gemma-4-26b-a4b-it",
      "short_name": "Gemma 4 26B IT",
      "provider": "google",
      "task": "text-generation",
      "params": "26B",
      "context_length": 256000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "Gemma 4 is Google's most intelligent family of open models, built from Gemini 3 research to maximize intelligence-per-parameter.",
      "docs_url": null,
      "licence_url": "https://ai.google.dev/gemma/docs/gemma_4_license",
      "flagship": true,
      "created_at": "2026-04-02 15:05:22.642"
    },
    {
      "id": "@cf/google/gemma-3-12b-it",
      "short_name": "Gemma 3 12B IT",
      "provider": "google",
      "task": "text-generation",
      "params": "12B",
      "context_length": 80000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "flagship",
      "description": "Gemma 3 models are well-suited for a variety of text generation and image understanding tasks, including question answering, summarization, and reasoning. Gemma 3 models are multimodal, handling text and image input and generating text output, with a large, 128K context window, multilingual support in over 140 languages, and is available in more sizes than previous versions.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-03-18 03:58:02.423"
    },
    {
      "id": "@cf/google/gemma-7b-it-lora",
      "short_name": "Gemma 7B IT LORA",
      "provider": "google",
      "task": "text-generation",
      "params": "7B",
      "context_length": 3500,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "fast",
      "description": "  This is a Gemma-7B base model that Cloudflare dedicates for inference with LoRA adapters. Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-04-02 00:20:19.633"
    },
    {
      "id": "@hf/google/gemma-7b-it",
      "short_name": "Gemma 7B IT",
      "provider": "google",
      "task": "text-generation",
      "params": "7B",
      "context_length": 8192,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "fast",
      "description": "Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants.",
      "docs_url": "https://ai.google.dev/gemma/docs",
      "licence_url": "https://ai.google.dev/gemma/terms",
      "flagship": false,
      "created_at": "2024-04-01 23:51:35.866"
    },
    {
      "id": "@cf/google/gemma-2b-it-lora",
      "short_name": "Gemma 2B IT LORA",
      "provider": "google",
      "task": "text-generation",
      "params": "2B",
      "context_length": 8192,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "balanced",
      "description": "This is a Gemma-2B base model that Cloudflare dedicates for inference with LoRA adapters. Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-04-02 00:19:34.669"
    },
    {
      "id": "@cf/huggingface/distilbert-sst-2-int8",
      "short_name": "Distilbert Sst 2 INT8",
      "provider": "huggingface",
      "task": "text-classification",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Distilled BERT model that was finetuned on SST-2 for sentiment classification",
      "docs_url": "https://huggingface.co/Intel/distilbert-base-uncased-finetuned-sst-2-english-int8-static",
      "licence_url": null,
      "flagship": false,
      "created_at": "2023-09-25 19:21:11.898"
    },
    {
      "id": "@cf/ibm-granite/granite-4.0-h-micro",
      "short_name": "Granite 4.0 H Micro",
      "provider": "ibm-granite",
      "task": "text-generation",
      "params": null,
      "context_length": 131000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "Granite 4.0 instruct models deliver strong performance across benchmarks, achieving industry-leading results in key agentic tasks like instruction following and function calling. These efficiencies make the models well-suited for a wide range of use cases like retrieval-augmented generation (RAG), multi-agent workflows, and edge deployments.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-10-07 18:46:29.436"
    },
    {
      "id": "@cf/leonardo/lucid-origin",
      "short_name": "Lucid Origin",
      "provider": "leonardo",
      "task": "text-to-image",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Lucid Origin from Leonardo.AI is their most adaptable and prompt-responsive model to date. Whether you're generating images with sharp graphic design, stunning full-HD renders, or highly specific creative direction, it adheres closely to your prompts, renders text with accuracy, and supports a wide array of visual styles and aesthetics – from stylized concept art to crisp product mockups.\n",
      "docs_url": null,
      "licence_url": "https://leonardo.ai/terms-of-service/",
      "flagship": false,
      "created_at": "2025-08-25 19:21:28.770"
    },
    {
      "id": "@cf/leonardo/phoenix-1.0",
      "short_name": "Phoenix 1.0",
      "provider": "leonardo",
      "task": "text-to-image",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Phoenix 1.0 is a model by Leonardo.Ai that generates images with exceptional prompt adherence and coherent text.",
      "docs_url": null,
      "licence_url": "https://leonardo.ai/terms-of-service/",
      "flagship": false,
      "created_at": "2025-08-25 18:12:18.073"
    },
    {
      "id": "@cf/llava-hf/llava-1.5-7b-hf",
      "short_name": "Llava 1.5 7B HF",
      "provider": "llava-hf",
      "task": "image-to-text",
      "params": "7B",
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": true,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "fast",
      "description": "LLaVA is an open-source chatbot trained by fine-tuning LLaMA/Vicuna on GPT-generated multimodal instruction-following data. It is an auto-regressive language model, based on the transformer architecture.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-05-01 18:00:39.971"
    },
    {
      "id": "@cf/lykon/dreamshaper-8-lcm",
      "short_name": "Dreamshaper 8 Lcm",
      "provider": "lykon",
      "task": "text-to-image",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Stable Diffusion model that has been fine-tuned to be better at photorealism without sacrificing range.",
      "docs_url": "https://huggingface.co/Lykon/DreamShaper",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 17:40:38.881"
    },
    {
      "id": "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
      "short_name": "Llama 3.3 70B Instruct FP8 Fast",
      "provider": "meta",
      "task": "text-generation",
      "params": "70B",
      "context_length": 24000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "Llama 3.3 70B quantized to fp8 precision, optimized to be faster.",
      "docs_url": null,
      "licence_url": "https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/LICENSE",
      "flagship": true,
      "created_at": "2024-12-06 17:09:18.338"
    },
    {
      "id": "@cf/meta/llama-4-scout-17b-16e-instruct",
      "short_name": "Llama 4 Scout 17B 16E Instruct",
      "provider": "meta",
      "task": "text-generation",
      "params": "17B",
      "context_length": 131000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "Meta's Llama 4 Scout is a 17 billion parameter model with 16 experts that is natively multimodal. These models leverage a mixture-of-experts architecture to offer industry-leading performance in text and image understanding.",
      "docs_url": null,
      "licence_url": "https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE",
      "flagship": true,
      "created_at": "2025-04-05 20:25:56.137"
    },
    {
      "id": "@cf/meta/llama-3.2-11b-vision-instruct",
      "short_name": "Llama 3.2 11B Vision Instruct",
      "provider": "meta",
      "task": "text-generation",
      "params": "11B",
      "context_length": 128000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": true,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "flagship",
      "description": " The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image.",
      "docs_url": null,
      "licence_url": "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE",
      "flagship": true,
      "created_at": "2024-09-25 05:36:04.547"
    },
    {
      "id": "@cf/meta/llama-3.1-8b-instruct-awq",
      "short_name": "Llama 3.1 8B Instruct AWQ",
      "provider": "meta",
      "task": "text-generation",
      "params": "8B",
      "context_length": 8192,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "Quantized (int4) generative text model with 8 billion parameters from Meta.\n",
      "docs_url": null,
      "licence_url": "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE",
      "flagship": true,
      "created_at": "2024-07-25 17:46:04.304"
    },
    {
      "id": "@cf/meta/llama-guard-3-8b",
      "short_name": "Llama Guard 3 8B",
      "provider": "meta",
      "task": "text-generation",
      "params": "8B",
      "context_length": 131072,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "flagship",
      "description": "Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM – it generates text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-01-22 23:26:23.495"
    },
    {
      "id": "@cf/meta/llama-3-8b-instruct",
      "short_name": "Llama 3 8B Instruct",
      "provider": "meta",
      "task": "text-generation",
      "params": "8B",
      "context_length": 7968,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Generation over generation, Meta Llama 3 demonstrates state-of-the-art performance on a wide range of industry benchmarks and offers new capabilities, including improved reasoning.",
      "docs_url": "https://llama.meta.com",
      "licence_url": "https://llama.meta.com/llama3/license/#",
      "flagship": false,
      "created_at": "2024-04-18 20:31:47.273"
    },
    {
      "id": "@cf/meta/llama-3-8b-instruct-awq",
      "short_name": "Llama 3 8B Instruct AWQ",
      "provider": "meta",
      "task": "text-generation",
      "params": "8B",
      "context_length": 8192,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Quantized (int4) generative text model with 8 billion parameters from Meta.",
      "docs_url": "https://llama.meta.com",
      "licence_url": "https://llama.meta.com/llama3/license/#",
      "flagship": false,
      "created_at": "2024-05-09 23:32:47.584"
    },
    {
      "id": "@cf/meta/llama-3.1-8b-instruct-fp8",
      "short_name": "Llama 3.1 8B Instruct FP8",
      "provider": "meta",
      "task": "text-generation",
      "params": "8B",
      "context_length": 32000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Llama 3.1 8B quantized to FP8 precision",
      "docs_url": null,
      "licence_url": "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE",
      "flagship": false,
      "created_at": "2024-07-25 17:28:43.328"
    },
    {
      "id": "@cf/meta/llama-2-7b-chat-fp16",
      "short_name": "Llama 2 7B Chat FP16",
      "provider": "meta",
      "task": "text-generation",
      "params": "7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Full precision (fp16) generative text model with 7 billion parameters from Meta",
      "docs_url": "https://ai.meta.com/llama/",
      "licence_url": "https://ai.meta.com/resources/models-and-libraries/llama-downloads/",
      "flagship": false,
      "created_at": "2023-11-07 11:54:20.229"
    },
    {
      "id": "@cf/meta/llama-2-7b-chat-int8",
      "short_name": "Llama 2 7B Chat INT8",
      "provider": "meta",
      "task": "text-generation",
      "params": "7B",
      "context_length": 8192,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Quantized (int8) generative text model with 7 billion parameters from Meta",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2023-09-25 19:21:11.898"
    },
    {
      "id": "@cf/meta/llama-3.2-3b-instruct",
      "short_name": "Llama 3.2 3B Instruct",
      "provider": "meta",
      "task": "text-generation",
      "params": "3B",
      "context_length": 80000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.",
      "docs_url": null,
      "licence_url": "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE",
      "flagship": false,
      "created_at": "2024-09-25 20:05:43.986"
    },
    {
      "id": "@cf/meta/llama-3.2-1b-instruct",
      "short_name": "Llama 3.2 1B Instruct",
      "provider": "meta",
      "task": "text-generation",
      "params": "1B",
      "context_length": 60000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.",
      "docs_url": null,
      "licence_url": "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE",
      "flagship": false,
      "created_at": "2024-09-25 21:36:32.050"
    },
    {
      "id": "@cf/meta/m2m100-1.2b",
      "short_name": "M2m100 1.2B",
      "provider": "meta",
      "task": "translation",
      "params": "1.2B",
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many multilingual translation",
      "docs_url": "https://github.com/facebookresearch/fairseq/tree/main/examples/m2m_100",
      "licence_url": "https://github.com/facebookresearch/fairseq/blob/main/LICENSE",
      "flagship": false,
      "created_at": "2023-09-25 19:21:11.898"
    },
    {
      "id": "@cf/meta-llama/llama-2-7b-chat-hf-lora",
      "short_name": "Llama 2 7B Chat HF LORA",
      "provider": "meta-llama",
      "task": "text-generation",
      "params": "7B",
      "context_length": 8192,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "fast",
      "description": "This is a Llama2 base model that Cloudflare dedicated for inference with LoRA adapters. Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. ",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-04-02 00:17:18.579"
    },
    {
      "id": "@cf/microsoft/resnet-50",
      "short_name": "Resnet 50",
      "provider": "microsoft",
      "task": "image-classification",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "50 layers deep image classification CNN trained on more than 1M images from ImageNet",
      "docs_url": "https://www.microsoft.com/en-us/research/blog/microsoft-vision-model-resnet-50-combines-web-scale-data-and-multi-task-learning-to-achieve-state-of-the-art/",
      "licence_url": null,
      "flagship": true,
      "created_at": "2023-09-25 19:21:11.898"
    },
    {
      "id": "@cf/microsoft/phi-2",
      "short_name": "Phi 2",
      "provider": "microsoft",
      "task": "text-generation",
      "params": null,
      "context_length": 2048,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "Phi-2 is a Transformer-based model with a next-word prediction objective, trained on 1.4T tokens from multiple passes on a mixture of Synthetic and Web datasets for NLP and coding.",
      "docs_url": "https://huggingface.co/microsoft/phi-2",
      "licence_url": null,
      "flagship": true,
      "created_at": "2024-02-27 18:26:21.126"
    },
    {
      "id": "@cf/mistral/mistral-7b-instruct-v0.1",
      "short_name": "Mistral 7B Instruct V0.1",
      "provider": "mistral",
      "task": "text-generation",
      "params": "7B",
      "context_length": 2824,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "fast",
      "description": "Instruct fine-tuned version of the Mistral-7b generative text model with 7 billion parameters",
      "docs_url": "https://mistral.ai/news/announcing-mistral-7b/",
      "licence_url": null,
      "flagship": false,
      "created_at": "2023-11-07 11:54:20.229"
    },
    {
      "id": "@cf/mistral/mistral-7b-instruct-v0.2-lora",
      "short_name": "Mistral 7B Instruct V0.2 LORA",
      "provider": "mistral",
      "task": "text-generation",
      "params": "7B",
      "context_length": 15000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "fast",
      "description": "The Mistral-7B-Instruct-v0.2 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.2.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-04-01 22:14:40.529"
    },
    {
      "id": "@hf/mistral/mistral-7b-instruct-v0.2",
      "short_name": "Mistral 7B Instruct V0.2",
      "provider": "mistral",
      "task": "text-generation",
      "params": "7B",
      "context_length": 3072,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "fast",
      "description": "The Mistral-7B-Instruct-v0.2 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.2. Mistral-7B-v0.2 has the following changes compared to Mistral-7B-v0.1: 32k context window (vs 8k context in v0.1), rope-theta = 1e6, and no Sliding-Window Attention.",
      "docs_url": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-04-02 13:00:59.244"
    },
    {
      "id": "@cf/mistralai/mistral-small-3.1-24b-instruct",
      "short_name": "Mistral Small 3.1 24B Instruct",
      "provider": "mistralai",
      "task": "text-generation",
      "params": "24B",
      "context_length": 128000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "Building upon Mistral Small 3 (2501), Mistral Small 3.1 (2503) adds state-of-the-art vision understanding and enhances long context capabilities up to 128k tokens without compromising text performance. With 24 billion parameters, this model achieves top-tier capabilities in both text and vision tasks.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-03-18 03:28:37.890"
    },
    {
      "id": "@cf/moonshotai/kimi-k2.6",
      "short_name": "Kimi K2.6",
      "provider": "moonshotai",
      "task": "text-generation",
      "params": null,
      "context_length": 262144,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "Kimi K2.6 is a frontier-scale open-source 1T parameter model with a 262.1k context window, multi-turn tool calling, vision inputs, and structured outputs for agentic workloads.",
      "docs_url": null,
      "licence_url": "https://huggingface.co/moonshotai/Kimi-K2.6/blob/main/LICENSE",
      "flagship": true,
      "created_at": "2026-04-20 01:40:35.001"
    },
    {
      "id": "@cf/moonshotai/kimi-k2.5",
      "short_name": "Kimi K2.5",
      "provider": "moonshotai",
      "task": "text-generation",
      "params": null,
      "context_length": 256000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "balanced",
      "description": "Kimi K2.5 is a frontier-scale open-source model with a 256k context window, multi-turn tool calling, vision inputs, and structured outputs for agentic workloads.",
      "docs_url": null,
      "licence_url": "https://github.com/MoonshotAI/Kimi-K2.5/blob/master/LICENSE",
      "flagship": false,
      "created_at": "2026-02-02 21:11:49.874"
    },
    {
      "id": "@cf/myshell-ai/melotts",
      "short_name": "Melotts",
      "provider": "myshell-ai",
      "task": "text-to-speech",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "MeloTTS is a high-quality multi-lingual text-to-speech library by MyShell.ai.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-07-19 15:51:04.819"
    },
    {
      "id": "@hf/nexusflow/starling-lm-7b-beta",
      "short_name": "Starling Lm 7B Beta",
      "provider": "nexusflow",
      "task": "text-generation",
      "params": "7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "We introduce Starling-LM-7B-beta, an open large language model (LLM) trained by Reinforcement Learning from AI Feedback (RLAIF). Starling-LM-7B-beta is trained from Openchat-3.5-0106 with our new reward model Nexusflow/Starling-RM-34B and policy optimization method Fine-Tuning Language Models from Human Preferences (PPO).",
      "docs_url": "https://huggingface.co/Nexusflow/Starling-LM-7B-beta",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-04-01 23:49:31.797"
    },
    {
      "id": "@hf/nousresearch/hermes-2-pro-mistral-7b",
      "short_name": "Hermes 2 Pro Mistral 7B",
      "provider": "nousresearch",
      "task": "text-generation",
      "params": "7B",
      "context_length": 24000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Hermes 2 Pro on Mistral 7B is the new flagship 7B Hermes! Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house.",
      "docs_url": "https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-04-01 23:45:53.800"
    },
    {
      "id": "@cf/nvidia/nemotron-3-120b-a12b",
      "short_name": "Nemotron 3 120B",
      "provider": "nvidia",
      "task": "text-generation",
      "params": "120B",
      "context_length": 256000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "NVIDIA Nemotron 3 Super is a hybrid MoE model with leading accuracy for multi-agent applications and specialized agentic AI systems.",
      "docs_url": null,
      "licence_url": "https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-nemotron-open-model-license/",
      "flagship": true,
      "created_at": "2026-02-24 23:22:47.215"
    },
    {
      "id": "@cf/openai/whisper-large-v3-turbo",
      "short_name": "Whisper Large V3 Turbo",
      "provider": "openai",
      "task": "automatic-speech-recognition",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. ",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2024-05-22 00:02:18.656"
    },
    {
      "id": "@cf/openai/whisper",
      "short_name": "Whisper",
      "provider": "openai",
      "task": "automatic-speech-recognition",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multitasking model that can perform multilingual speech recognition, speech translation, and language identification.",
      "docs_url": "https://openai.com/research/whisper",
      "licence_url": null,
      "flagship": false,
      "created_at": "2023-09-25 19:21:11.898"
    },
    {
      "id": "@cf/openai/whisper-tiny-en",
      "short_name": "Whisper Tiny En",
      "provider": "openai",
      "task": "automatic-speech-recognition",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "fast",
      "description": "Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalize to many datasets and domains without the need for fine-tuning. This is the English-only version of the Whisper Tiny model which was trained on the task of speech recognition.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-04-22 20:59:02.731"
    },
    {
      "id": "@cf/openai/gpt-oss-120b",
      "short_name": "Gpt OSS 120B",
      "provider": "openai",
      "task": "text-generation",
      "params": "120B",
      "context_length": 128000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "OpenAI’s open-weight models designed for powerful reasoning, agentic tasks, and versatile developer use cases – gpt-oss-120b is for production, general purpose, high reasoning use-cases.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-08-05 10:27:29.131"
    },
    {
      "id": "@cf/openai/gpt-oss-20b",
      "short_name": "Gpt OSS 20B",
      "provider": "openai",
      "task": "text-generation",
      "params": "20B",
      "context_length": 128000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "OpenAI’s open-weight models designed for powerful reasoning, agentic tasks, and versatile developer use cases – gpt-oss-20b is for lower latency, and local or specialized use-cases.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-08-05 10:49:53.265"
    },
    {
      "id": "@cf/openchat/openchat-3.5-0106",
      "short_name": "Openchat 3.5 0106",
      "provider": "openchat",
      "task": "text-generation",
      "params": null,
      "context_length": 8192,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "balanced",
      "description": "OpenChat is an innovative library of open-source language models, fine-tuned with C-RLFT - a strategy inspired by offline reinforcement learning.",
      "docs_url": "https://huggingface.co/openchat/openchat-3.5-0106",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 18:20:39.169"
    },
    {
      "id": "@cf/pfnet/plamo-embedding-1b",
      "short_name": "Plamo Embedding 1B",
      "provider": "pfnet",
      "task": "text-embeddings",
      "params": "1B",
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "fast",
      "description": "PLaMo-Embedding-1B is a Japanese text embedding model developed by Preferred Networks, Inc.\n\nIt can convert Japanese text input into numerical vectors and can be used for a wide range of applications, including information retrieval, text classification, and clustering.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2025-09-24 18:42:05.576"
    },
    {
      "id": "@cf/pipecat-ai/smart-turn-v2",
      "short_name": "Smart Turn V2",
      "provider": "pipecat-ai",
      "task": "dumb-pipe",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "An open source, community-driven, native audio turn detection model in 2nd version",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2025-08-04 10:08:04.219"
    },
    {
      "id": "@cf/qwen/qwen3-embedding-0.6b",
      "short_name": "Qwen3 Embedding 0.6B",
      "provider": "qwen",
      "task": "text-embeddings",
      "params": "0.6B",
      "context_length": 8192,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "The Qwen3 Embedding model series is the latest proprietary model of the Qwen family, specifically designed for text embedding and ranking tasks. ",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-06-18 20:23:22.086"
    },
    {
      "id": "@cf/qwen/qwen2.5-coder-32b-instruct",
      "short_name": "Qwen2.5 Coder 32B Instruct",
      "provider": "qwen",
      "task": "text-generation",
      "params": "32B",
      "context_length": 32768,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": true
      },
      "tier": "flagship",
      "description": "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). As of now, Qwen2.5-Coder has covered six mainstream model sizes, 0.5, 1.5, 3, 7, 14, 32 billion parameters, to meet the needs of different developers. Qwen2.5-Coder brings the following improvements upon CodeQwen1.5:",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-02-27 00:31:43.829"
    },
    {
      "id": "@cf/qwen/qwq-32b",
      "short_name": "QwQ 32B",
      "provider": "qwen",
      "task": "text-generation",
      "params": "32B",
      "context_length": 24000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": true,
        "streaming": true,
        "lora": true
      },
      "tier": "flagship",
      "description": "QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks, especially hard problems. QwQ-32B is the medium-sized reasoning model, which is capable of achieving competitive performance against state-of-the-art reasoning models, e.g., DeepSeek-R1, o1-mini.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-03-05 21:52:40.974"
    },
    {
      "id": "@cf/qwen/qwen3-30b-a3b-fp8",
      "short_name": "Qwen3 30B FP8",
      "provider": "qwen",
      "task": "text-generation",
      "params": "30B",
      "context_length": 32768,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "Qwen3 is the latest generation of large language models in Qwen series, offering a comprehensive suite of dense and mixture-of-experts (MoE) models. Built upon extensive training, Qwen3 delivers groundbreaking advancements in reasoning, instruction-following, agent capabilities, and multilingual support.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2025-04-30 21:36:10.009"
    },
    {
      "id": "@cf/qwen/qwen1.5-14b-chat-awq",
      "short_name": "Qwen1.5 14B Chat AWQ",
      "provider": "qwen",
      "task": "text-generation",
      "params": "14B",
      "context_length": 7500,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "balanced",
      "description": "Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud. AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization.",
      "docs_url": "https://huggingface.co/qwen/qwen1.5-14b-chat-awq",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 18:24:45.316"
    },
    {
      "id": "@cf/qwen/qwen1.5-7b-chat-awq",
      "short_name": "Qwen1.5 7B Chat AWQ",
      "provider": "qwen",
      "task": "text-generation",
      "params": "7B",
      "context_length": 20000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud. AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization.",
      "docs_url": "https://huggingface.co/qwen/qwen1.5-7b-chat-awq",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 18:24:11.709"
    },
    {
      "id": "@cf/qwen/qwen1.5-1.8b-chat",
      "short_name": "Qwen1.5 1.8B Chat",
      "provider": "qwen",
      "task": "text-generation",
      "params": "1.8B",
      "context_length": 32000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "balanced",
      "description": "Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud.",
      "docs_url": "https://huggingface.co/qwen/qwen1.5-1.8b-chat",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 18:30:31.723"
    },
    {
      "id": "@cf/qwen/qwen1.5-0.5b-chat",
      "short_name": "Qwen1.5 0.5B Chat",
      "provider": "qwen",
      "task": "text-generation",
      "params": "0.5B",
      "context_length": 32000,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "balanced",
      "description": "Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud.",
      "docs_url": "https://huggingface.co/qwen/qwen1.5-0.5b-chat",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 18:23:37.344"
    },
    {
      "id": "@cf/runwayml/stable-diffusion-v1-5-img2img",
      "short_name": "Stable Diffusion V1 5 Img2img",
      "provider": "runwayml",
      "task": "text-to-image",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Stable Diffusion is a latent text-to-image diffusion model capable of generating photo-realistic images. Img2img generate a new image from an input image with Stable Diffusion. ",
      "docs_url": "https://huggingface.co/runwayml/stable-diffusion-v1-5",
      "licence_url": "https://github.com/runwayml/stable-diffusion/blob/main/LICENSE",
      "flagship": false,
      "created_at": "2024-02-27 17:32:28.581"
    },
    {
      "id": "@cf/runwayml/stable-diffusion-v1-5-inpainting",
      "short_name": "Stable Diffusion V1 5 Inpainting",
      "provider": "runwayml",
      "task": "text-to-image",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "Stable Diffusion Inpainting is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input, with the extra capability of inpainting the pictures by using a mask.",
      "docs_url": "https://huggingface.co/runwayml/stable-diffusion-inpainting",
      "licence_url": "https://github.com/runwayml/stable-diffusion/blob/main/LICENSE",
      "flagship": false,
      "created_at": "2024-02-27 17:23:57.528"
    },
    {
      "id": "@cf/stabilityai/stable-diffusion-xl-base-1.0",
      "short_name": "Stable Diffusion Xl Base 1.0",
      "provider": "stabilityai",
      "task": "text-to-image",
      "params": null,
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "flagship",
      "description": "Diffusion-based text-to-image generative model by Stability AI. Generates and modify images based on text prompts.",
      "docs_url": "https://stability.ai/stable-diffusion",
      "licence_url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md",
      "flagship": true,
      "created_at": "2023-11-10 10:54:43.694"
    },
    {
      "id": "@hf/thebloke/llama-2-13b-chat-awq",
      "short_name": "Llama 2 13B Chat AWQ",
      "provider": "thebloke",
      "task": "text-generation",
      "params": "13B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "balanced",
      "description": "Llama 2 13B Chat AWQ is an efficient, accurate and blazing-fast low-bit weight quantized Llama 2 variant.",
      "docs_url": "https://huggingface.co/TheBloke/Llama-2-13B-chat-AWQ",
      "licence_url": null,
      "flagship": false,
      "created_at": "2023-11-24 00:27:15.869"
    },
    {
      "id": "@cf/thebloke/discolm-german-7b-v1-awq",
      "short_name": "Discolm German 7B V1 AWQ",
      "provider": "thebloke",
      "task": "text-generation",
      "params": "7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "DiscoLM German 7b is a Mistral-based large language model with a focus on German-language applications. AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization.",
      "docs_url": "https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-AWQ",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 18:23:05.178"
    },
    {
      "id": "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
      "short_name": "Mistral 7B Instruct V0.1 AWQ",
      "provider": "thebloke",
      "task": "text-generation",
      "params": "7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Mistral 7B Instruct v0.1 AWQ is an efficient, accurate and blazing-fast low-bit weight quantized Mistral variant.",
      "docs_url": "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-AWQ",
      "licence_url": null,
      "flagship": false,
      "created_at": "2023-11-24 00:27:15.869"
    },
    {
      "id": "@hf/thebloke/neural-chat-7b-v3-1-awq",
      "short_name": "Neural Chat 7B V3 1 AWQ",
      "provider": "thebloke",
      "task": "text-generation",
      "params": "7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "This model is a fine-tuned 7B parameter LLM on the Intel Gaudi 2 processor from the mistralai/Mistral-7B-v0.1 on the open source dataset Open-Orca/SlimOrca.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-06 18:12:30.722"
    },
    {
      "id": "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
      "short_name": "Openhermes 2.5 Mistral 7B AWQ",
      "provider": "thebloke",
      "task": "text-generation",
      "params": "7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "OpenHermes 2.5 Mistral 7B is a state of the art Mistral Fine-tune, a continuation of OpenHermes 2 model, which trained on additional code datasets.",
      "docs_url": null,
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-06 18:04:22.846"
    },
    {
      "id": "@hf/thebloke/zephyr-7b-beta-awq",
      "short_name": "Zephyr 7B Beta AWQ",
      "provider": "thebloke",
      "task": "text-generation",
      "params": "7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Zephyr 7B Beta AWQ is an efficient, accurate and blazing-fast low-bit weight quantized Zephyr model variant.",
      "docs_url": "https://huggingface.co/TheBloke/zephyr-7B-beta-AWQ",
      "licence_url": null,
      "flagship": false,
      "created_at": "2023-11-24 00:27:15.869"
    },
    {
      "id": "@hf/thebloke/deepseek-coder-6.7b-base-awq",
      "short_name": "DeepSeek Coder 6.7B Base AWQ",
      "provider": "thebloke",
      "task": "text-generation",
      "params": "6.7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "balanced",
      "description": "Deepseek Coder is composed of a series of code language models, each trained from scratch on 2T tokens, with a composition of 87% code and 13% natural language in both English and Chinese.",
      "docs_url": null,
      "licence_url": "https://huggingface.co/TheBloke/deepseek-coder-6.7B-base-AWQ",
      "flagship": false,
      "created_at": "2024-02-06 18:16:27.183"
    },
    {
      "id": "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
      "short_name": "DeepSeek Coder 6.7B Instruct AWQ",
      "provider": "thebloke",
      "task": "text-generation",
      "params": "6.7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "balanced",
      "description": "Deepseek Coder is composed of a series of code language models, each trained from scratch on 2T tokens, with a composition of 87% code and 13% natural language in both English and Chinese.",
      "docs_url": null,
      "licence_url": "https://huggingface.co/TheBloke/deepseek-coder-6.7B-instruct-AWQ",
      "flagship": false,
      "created_at": "2024-02-06 18:18:27.462"
    },
    {
      "id": "@cf/tiiuae/falcon-7b-instruct",
      "short_name": "Falcon 7B Instruct",
      "provider": "tiiuae",
      "task": "text-generation",
      "params": "7B",
      "context_length": 4096,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "Falcon-7B-Instruct is a 7B parameters causal decoder-only model built by TII based on Falcon-7B and finetuned on a mixture of chat/instruct datasets.",
      "docs_url": "https://huggingface.co/tiiuae/falcon-7b-instruct",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 18:21:15.796"
    },
    {
      "id": "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
      "short_name": "Tinyllama 1.1B Chat V1.0",
      "provider": "tinyllama",
      "task": "text-generation",
      "params": "1.1B",
      "context_length": 2048,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "fast",
      "description": "The TinyLlama project aims to pretrain a 1.1B Llama model on 3 trillion tokens. This is the chat model finetuned on top of TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T.",
      "docs_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 18:25:37.524"
    },
    {
      "id": "@cf/unum/uform-gen2-qwen-500m",
      "short_name": "Uform Gen2 Qwen 500M",
      "provider": "unum",
      "task": "image-to-text",
      "params": "500M",
      "context_length": null,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": false,
        "vision": true,
        "reasoning": false,
        "streaming": false,
        "lora": false
      },
      "tier": "balanced",
      "description": "UForm-Gen is a small generative vision-language model primarily designed for Image Captioning and Visual Question Answering. The model was pre-trained on the internal image captioning dataset and fine-tuned on public instructions datasets: SVIT, LVIS, VQAs datasets.",
      "docs_url": "https://www.unum.cloud/",
      "licence_url": null,
      "flagship": false,
      "created_at": "2024-02-27 18:28:52.485"
    },
    {
      "id": "@cf/zai-org/glm-4.7-flash",
      "short_name": "GLM 4.7 Flash",
      "provider": "zai-org",
      "task": "text-generation",
      "params": null,
      "context_length": 131072,
      "max_input": null,
      "max_output": null,
      "pricing": null,
      "capabilities": {
        "tools": true,
        "vision": false,
        "reasoning": false,
        "streaming": true,
        "lora": false
      },
      "tier": "flagship",
      "description": "GLM-4.7-Flash is a fast and efficient multilingual text generation model with a 131,072 token context window. Optimized for dialogue, instruction-following, and multi-turn tool calling across 100+ languages.",
      "docs_url": null,
      "licence_url": null,
      "flagship": true,
      "created_at": "2026-01-28 16:04:39.346"
    }
  ]
}