Remix.run Logo
binarymax 5 days ago

Does anyone have an API that maintains a list of all model versions for a provider? I hand-update OpenAI into a JSON file that I use for cost reporting in my apps (and in an npm package called llm-primitives).

Here's the current version:

    const pricesPerMillion = {
        "o1-2024-12-17": { input: 15.00, output: 60.00 },
        "o1-mini-2024-09-12": { input: 1.10, output: 4.40 },
        "o3-mini-2025-01-31": { input: 1.10, output: 4.40 },
        "gpt-4.5-preview-2025-02-27": { input: 75.00, output: 150.00 },
        "gpt-4o": { input: 5.00, output: 15.00 },
        "gpt-4o-2024-08-06": { input: 2.50, output: 10.00 },
        "gpt-4o-2024-05-13": { input: 5.00, output: 15.00 },
        "gpt-4o-mini": { input: 0.15, output: 0.60 },
        "gpt-4o-mini-2024-07-18": { input: 0.15, output: 0.60 },
        "gpt-4-0613": { input: 30.00, output: 60.00 },
        "gpt-4-turbo-2024-04-09": { input: 10.00, output: 30.00 },
        "gpt-3.5-turbo": { input: 0.003, output: 0.006 },
        "gpt-4.1": { input: 2.00, output: 8.00 },
        "gpt-4.1-2025-04-14": { input: 2.00, output: 8.00 },
        "gpt-4.1-mini": { input: 0.40, output: 1.60 },
        "gpt-4.1-mini-2025-04-14": { input: 0.40, output: 1.60 },
        "gpt-4.1-nano": { input: 0.10, output: 0.40 },
        "gpt-4.1-nano-2025-04-14": { input: 0.10, output: 0.40 },
        "gpt-4o-audio-preview-2024-12-17": { input: 2.50, output: 10.00 },
        "gpt-4o-realtime-preview-2024-12-17": { input: 5.00, output: 20.00 },
        "gpt-4o-mini-audio-preview-2024-12-17": { input: 0.15, output: 0.60 },
        "gpt-4o-mini-realtime-preview-2024-12-17": { input: 0.60, output: 2.40 },
        "o1-pro-2025-03-19": { input: 150.00, output: 600.00 },
        "o3-pro-2025-06-10": { input: 20.00, output: 80.00 },
        "o3-2025-04-16": { input: 2.00, output: 8.00 },
        "o4-mini-2025-04-16": { input: 1.10, output: 4.40 },
        "codex-mini-latest": { input: 1.50, output: 6.00 },
        "gpt-4o-mini-search-preview-2025-03-11": { input: 0.15, output: 0.60 },
        "gpt-4o-search-preview-2025-03-11": { input: 2.50, output: 10.00 },
        "computer-use-preview-2025-03-11": { input: 3.00, output: 12.00 }
    };
I would love to replace this with an API call.
urbandw311er 5 days ago | parent | next [-]

Check out the source code to the vercel AI SDK. I’ve noticed that they broker calls out to various LLMs and then seem to return the cost as part of the response. So I’m thinking that this data could well be in there somewhere. Away from my desk right now so can’t check.

stogot 5 days ago | parent [-]

I do this with other tools

1. Pull some large tech company’s open source’ tools’ JS file 2. Extract an internal JSON blob that contains otherwise difficult information 3. Parse it and use what I need from within it for my tool

zerocool0101 5 days ago | parent | prev [-]

this is a snippet of the structure of the JSON file for this website if it helps: { "provider_id": 6, "provider": 7, "input_price_per_1m_tokens": 8, "output_price_per_1m_tokens": 9, "response_time_ms": 10, "actual_cost_usd": 11, "input_cost_per_word_usd": 12, "output_cost_per_word_usd": 13, "has_tiered_pricing": 14 }, "anthropic:claude-opus-4", "Anthropic Claude Opus 4", 15, 75, 1443.85168793832, 0.045, 0.00006, 0.0003, false, { "provider_id": 16, "provider": 17, "input_price_per_1m_tokens": 18, "output_price_per_1m_tokens": 8, "response_time_ms": 19, "actual_cost_usd": 20, "input_cost_per_word_usd": 21, "output_cost_per_word_usd": 12, "has_tiered_pricing": 14 }, "anthropic:claude-sonnet-4", "Anthropic Claude Sonnet 4", 3, 1568.72692800385, 0.009, 0.000012, { "provider_id": 23, "provider": 24, "input_price_per_1m_tokens": 25, "output_price_per_1m_tokens": 26, "response_time_ms": 27, "actual_cost_usd": 28, "input_cost_per_word_usd": 29, "output_cost_per_word_usd": 30, "has_tiered_pricing": 14 }, "anthropic:claude-haiku-3.5", "Anthropic Claude Haiku 3.5", 0.8, 4, 2141.1094386851, 0.0024, 0.0000032, 0.000016, { "provider_id": 32, "provider": 33, "input_price_per_1m_tokens": 8, "output_price_per_1m_tokens": 9, "response_time_ms": 34, "actual_cost_usd": 11, "input_cost_per_word_usd": 12, "output_cost_per_word_usd": 13, "has_tiered_pricing": 14 }, "anthropic:claude-opus-3", "Anthropic Claude Opus 3", 2538.34107347902, { "provider_id": 36, "provider": 37, "input_price_per_1m_tokens": 18, "output_price_per_1m_tokens": 8, "response_time_ms": 38, "actual_cost_usd": 20, "input_cost_per_word_usd": 21, "output_cost_per_word_usd": 12, "has_tiered_pricing": 14 }, "anthropic:claude-sonnet-3.7", "Anthropic Claude Sonnet 3.7", 2513.9738537193, { "provider_id": 40, "provider": 41, "input_price_per_1m_tokens": 42, "output_price_per_1m_tokens": 43, "response_time_ms": 44, "actual_cost_usd": 45, "input_cost_per_word_usd": 46, "output_cost_per_word_usd": 47, "has_tiered_pricing": 14 }, "anthropic:claude-haiku-3", "Anthropic Claude Haiku 3", 0.25, 1.25, 2874.71054013884, 0.00075, 0.000001, 0.000005, { "provider_id": 49, "provider": 50, "input_price_per_1m_tokens": 51, "output_price_per_1m_tokens": 52, "response_time_ms": 53, "actual_cost_usd": 54, "input_cost_per_word_usd": 55, "output_cost_per_word_usd": 56, "has_tiered_pricing": 14 }, "open-ai:open-ai-gpt-4.1-mini", "Open AI Open AI GPT-4.1-mini", 0.4, 1.6, 2903.77470624506, 0.001, 0.0000016, 0.0000064, { "provider_id": 58, "provider": 59, "input_price_per_1m_tokens": 60, "output_price_per_1m_tokens": 51, "response_time_ms": 61, "actual_cost_usd": 62, "input_cost_per_word_usd": 63, "output_cost_per_word_usd": 55, "has_tiered_pricing": 14 }, "open-ai:open-ai-gpt-4.1-nano", "Open AI Open AI GPT-4.1-nano", 0.1, 2650.13976342621, 0.00025, 4e-7, { "provider_id": 65, "provider": 66, "input_price_per_1m_tokens": 9, "output_price_per_1m_tokens": 67, "response_time_ms": 68, "actual_cost_usd": 69, "input_cost_per_word_usd": 13, "output_cost_per_word_usd": 70, "has_tiered_pricing": 14 },