Skip to content

Commit 5aac858

Browse files
authored
🤖 fix: correct GPT-5.2 context window to 272k tokens (#1223)
GPT-5.2 and GPT-5.2 Pro have a 272k token context window, not 400k. This matches the documented limits for the GPT-5.x model family. --- _Generated with `mux` • Model: `mux-gateway:anthropic/claude-opus-4-5` • Thinking: `high`_
1 parent 295745f commit 5aac858

File tree

3 files changed

+13
-6
lines changed

3 files changed

+13
-6
lines changed

‎src/common/utils/tokens/modelStats.test.ts‎

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,13 @@ describe("getModelStats", () => {
2323
expect(stats?.max_input_tokens).toBe(400000);
2424
expect(stats?.input_cost_per_token).toBe(0.000015);
2525
});
26+
27+
test("models-extra.ts should override models.json", () => {
28+
// gpt-5.2 exists in both files - models-extra.ts has correct 272k, models.json has incorrect 400k
29+
const stats = getModelStats("openai:gpt-5.2");
30+
expect(stats).not.toBeNull();
31+
expect(stats?.max_input_tokens).toBe(272000); // models-extra.ts override
32+
});
2633
});
2734

2835
describe("ollama model lookups with cloud suffix", () => {

‎src/common/utils/tokens/modelStats.ts‎

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -96,17 +96,17 @@ export function getModelStats(modelString: string): ModelStats | null {
9696
const normalized = normalizeGatewayModel(modelString);
9797
const lookupKeys = generateLookupKeys(normalized);
9898

99-
// Try each lookup pattern in main models.json
99+
// Check models-extra.ts first (overrides for models with incorrect upstream data)
100100
for (const key of lookupKeys) {
101-
const data = (modelsData as Record<string, RawModelData>)[key];
101+
const data = (modelsExtra as Record<string, RawModelData>)[key];
102102
if (data && isValidModelData(data)) {
103103
return extractModelStats(data);
104104
}
105105
}
106106

107-
// Fall back to models-extra.ts
107+
// Fall back to main models.json
108108
for (const key of lookupKeys) {
109-
const data = (modelsExtra as Record<string, RawModelData>)[key];
109+
const data = (modelsData as Record<string, RawModelData>)[key];
110110
if (data && isValidModelData(data)) {
111111
return extractModelStats(data);
112112
}

‎src/common/utils/tokens/models-extra.ts‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ export const modelsExtra: Record<string, ModelData> = {
4545
// Cached input: $0.175/M
4646
// Supports off, low, medium, high, xhigh reasoning levels
4747
"gpt-5.2": {
48-
max_input_tokens: 400000,
48+
max_input_tokens: 272000,
4949
max_output_tokens: 128000,
5050
input_cost_per_token: 0.00000175, // $1.75 per million input tokens
5151
output_cost_per_token: 0.000014, // $14 per million output tokens
@@ -64,7 +64,7 @@ export const modelsExtra: Record<string, ModelData> = {
6464
// $21/M input, $168/M output
6565
// Supports medium, high, xhigh reasoning levels
6666
"gpt-5.2-pro": {
67-
max_input_tokens: 400000,
67+
max_input_tokens: 272000,
6868
max_output_tokens: 128000,
6969
input_cost_per_token: 0.000021, // $21 per million input tokens
7070
output_cost_per_token: 0.000168, // $168 per million output tokens

0 commit comments

Comments
 (0)