{
  "version": "2026-04-27T09:15:01Z",
  "generated_at": "2026-04-27T09:15:01Z",
  "note": "Canonical pricing manifest for the tokcount extension. USD per 1M tokens.",
  "models": {
    "claude-opus-4-7": {
      "provider": "anthropic",
      "input": 15.0,
      "output": 75.0,
      "context": 200000,
      "tokenizer": "claude",
      "vision": true,
      "name": "Claude Opus 4.7"
    },
    "claude-opus-4-7-1m": {
      "provider": "anthropic",
      "input": 22.5,
      "output": 112.5,
      "context": 1000000,
      "tokenizer": "claude",
      "vision": true,
      "name": "Claude Opus 4.7 (1M)"
    },
    "claude-sonnet-4-6": {
      "provider": "anthropic",
      "input": 3.0,
      "output": 15.0,
      "context": 200000,
      "tokenizer": "claude",
      "vision": true,
      "name": "Claude Sonnet 4.6"
    },
    "claude-sonnet-4-6-1m": {
      "provider": "anthropic",
      "input": 6.0,
      "output": 22.5,
      "context": 1000000,
      "tokenizer": "claude",
      "vision": true,
      "name": "Claude Sonnet 4.6 (1M)"
    },
    "claude-haiku-4-5": {
      "provider": "anthropic",
      "input": 0.8,
      "output": 4.0,
      "context": 200000,
      "tokenizer": "claude",
      "vision": true,
      "name": "Claude Haiku 4.5"
    },
    "gpt-5.4": {
      "provider": "openai",
      "input": 2.5,
      "output": 10.0,
      "context": 2000000,
      "tokenizer": "o200k",
      "vision": true,
      "name": "GPT-5.4"
    },
    "gpt-5.4-mini": {
      "provider": "openai",
      "input": 0.15,
      "output": 0.6,
      "context": 400000,
      "tokenizer": "o200k",
      "vision": true,
      "name": "GPT-5.4 Mini"
    },
    "gpt-5.4-nano": {
      "provider": "openai",
      "input": 0.05,
      "output": 0.2,
      "context": 200000,
      "tokenizer": "o200k",
      "vision": false,
      "name": "GPT-5.4 Nano"
    },
    "gpt-4o": {
      "provider": "openai",
      "input": 2.5,
      "output": 10.0,
      "context": 128000,
      "tokenizer": "o200k",
      "vision": true,
      "name": "GPT-4o"
    },
    "gpt-4o-mini": {
      "provider": "openai",
      "input": 0.15,
      "output": 0.6,
      "context": 128000,
      "tokenizer": "o200k",
      "vision": true,
      "name": "GPT-4o Mini"
    },
    "o1": {
      "provider": "openai",
      "input": 15.0,
      "output": 60.0,
      "context": 200000,
      "tokenizer": "o200k",
      "vision": true,
      "name": "o1"
    },
    "o3": {
      "provider": "openai",
      "input": 8.0,
      "output": 32.0,
      "context": 200000,
      "tokenizer": "o200k",
      "vision": true,
      "name": "o3"
    },
    "o3-mini": {
      "provider": "openai",
      "input": 1.1,
      "output": 4.4,
      "context": 200000,
      "tokenizer": "o200k",
      "vision": true,
      "name": "o3 Mini"
    },
    "o4-mini": {
      "provider": "openai",
      "input": 1.2,
      "output": 4.8,
      "context": 200000,
      "tokenizer": "o200k",
      "vision": true,
      "name": "o4 Mini"
    },
    "gemini-3.1-pro": {
      "provider": "google",
      "input": 1.25,
      "output": 5.0,
      "context": 2000000,
      "tokenizer": "gemini",
      "vision": true,
      "name": "Gemini 3.1 Pro"
    },
    "gemini-3.1-flash": {
      "provider": "google",
      "input": 0.075,
      "output": 0.3,
      "context": 1000000,
      "tokenizer": "gemini",
      "vision": true,
      "name": "Gemini 3.1 Flash"
    },
    "gemini-3.1-flash-lite": {
      "provider": "google",
      "input": 0.035,
      "output": 0.15,
      "context": 1000000,
      "tokenizer": "gemini",
      "vision": false,
      "name": "Gemini 3.1 Flash Lite"
    },
    "gemini-2.5-pro": {
      "provider": "google",
      "input": 1.25,
      "output": 5.0,
      "context": 2000000,
      "tokenizer": "gemini",
      "vision": true,
      "name": "Gemini 2.5 Pro"
    },
    "gemini-2.5-flash": {
      "provider": "google",
      "input": 0.075,
      "output": 0.3,
      "context": 1000000,
      "tokenizer": "gemini",
      "vision": true,
      "name": "Gemini 2.5 Flash"
    },
    "grok-4.1-fast-2m": {
      "provider": "xai",
      "input": 2.0,
      "output": 10.0,
      "context": 2000000,
      "tokenizer": "gpt",
      "vision": true,
      "name": "Grok 4.1 Fast (2M)"
    },
    "grok-4": {
      "provider": "xai",
      "input": 3.0,
      "output": 15.0,
      "context": 256000,
      "tokenizer": "gpt",
      "vision": true,
      "name": "Grok 4"
    },
    "grok-3": {
      "provider": "xai",
      "input": 2.0,
      "output": 8.0,
      "context": 128000,
      "tokenizer": "gpt",
      "vision": false,
      "name": "Grok 3"
    },
    "llama-4-scout-10m": {
      "provider": "meta",
      "input": 0.11,
      "output": 0.34,
      "context": 10000000,
      "tokenizer": "llama",
      "vision": true,
      "name": "Llama 4 Scout (10M)"
    },
    "llama-4-maverick": {
      "provider": "meta",
      "input": 0.2,
      "output": 0.6,
      "context": 1000000,
      "tokenizer": "llama",
      "vision": true,
      "name": "Llama 4 Maverick"
    },
    "llama-3.3-70b": {
      "provider": "meta",
      "input": 0.59,
      "output": 0.79,
      "context": 128000,
      "tokenizer": "llama",
      "vision": false,
      "name": "Llama 3.3 70B"
    },
    "deepseek-v3.2": {
      "provider": "deepseek",
      "input": 0.27,
      "output": 1.1,
      "context": 128000,
      "tokenizer": "deepseek",
      "vision": false,
      "name": "DeepSeek V3.2"
    },
    "deepseek-r1": {
      "provider": "deepseek",
      "input": 0.55,
      "output": 2.19,
      "context": 64000,
      "tokenizer": "deepseek",
      "vision": false,
      "name": "DeepSeek R1"
    },
    "mistral-large-2": {
      "provider": "mistral",
      "input": 2.0,
      "output": 6.0,
      "context": 128000,
      "tokenizer": "mistral",
      "vision": false,
      "name": "Mistral Large 2"
    },
    "mistral-medium": {
      "provider": "mistral",
      "input": 0.4,
      "output": 2.0,
      "context": 128000,
      "tokenizer": "mistral",
      "vision": false,
      "name": "Mistral Medium"
    },
    "mistral-small": {
      "provider": "mistral",
      "input": 0.1,
      "output": 0.3,
      "context": 128000,
      "tokenizer": "mistral",
      "vision": false,
      "name": "Mistral Small"
    },
    "command-a": {
      "provider": "cohere",
      "input": 2.5,
      "output": 10.0,
      "context": 256000,
      "tokenizer": "cohere",
      "vision": false,
      "name": "Command A"
    },
    "command-r-plus": {
      "provider": "cohere",
      "input": 2.5,
      "output": 10.0,
      "context": 128000,
      "tokenizer": "cohere",
      "vision": false,
      "name": "Command R+"
    },
    "sonar-huge": {
      "provider": "perplexity",
      "input": 5.0,
      "output": 5.0,
      "context": 128000,
      "tokenizer": "gpt",
      "vision": false,
      "name": "Sonar Huge"
    },
    "sonar-pro": {
      "provider": "perplexity",
      "input": 3.0,
      "output": 15.0,
      "context": 200000,
      "tokenizer": "gpt",
      "vision": false,
      "name": "Sonar Pro"
    }
  },
  "defaultModels": {
    "chat.openai.com": "gpt-5.4",
    "chatgpt.com": "gpt-5.4",
    "claude.ai": "claude-sonnet-4-6",
    "gemini.google.com": "gemini-3.1-pro",
    "aistudio.google.com": "gemini-3.1-pro",
    "poe.com": "claude-sonnet-4-6",
    "perplexity.ai": "sonar-pro",
    "www.perplexity.ai": "sonar-pro",
    "openrouter.ai": "claude-sonnet-4-6",
    "huggingface.co": "llama-3.3-70b",
    "grok.com": "grok-4",
    "x.com": "grok-4",
    "chat.mistral.ai": "mistral-large-2",
    "mistral.ai": "mistral-large-2",
    "chat.deepseek.com": "deepseek-v3.2",
    "coral.cohere.com": "command-a"
  }
}