fix(opencode): add input limit for compaction (#8465)

This commit is contained in:
Brandon Smith
2026-01-15 01:35:16 -06:00
committed by GitHub
parent 92931437c4
commit 8d720f9463
5 changed files with 47 additions and 34 deletions

View File

@@ -361,38 +361,6 @@ export async function CodexAuthPlugin(input: PluginInput): Promise<Hooks> {
}
}
if (!provider.models["gpt-5.2-codex"]) {
const model = {
id: "gpt-5.2-codex",
providerID: "openai",
api: {
id: "gpt-5.2-codex",
url: "https://chatgpt.com/backend-api/codex",
npm: "@ai-sdk/openai",
},
name: "GPT-5.2 Codex",
capabilities: {
temperature: false,
reasoning: true,
attachment: true,
toolcall: true,
input: { text: true, audio: false, image: true, video: false, pdf: false },
output: { text: true, audio: false, image: false, video: false, pdf: false },
interleaved: false,
},
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
limit: { context: 400000, output: 128000 },
status: "active" as const,
options: {},
headers: {},
release_date: "2025-12-18",
variants: {} as Record<string, Record<string, any>>,
family: "gpt-codex",
}
model.variants = ProviderTransform.variants(model)
provider.models["gpt-5.2-codex"] = model
}
// Zero out costs for Codex (included with ChatGPT subscription)
for (const model of Object.values(provider.models)) {
model.cost = {

View File

@@ -47,6 +47,7 @@ export namespace ModelsDev {
.optional(),
limit: z.object({
context: z.number(),
input: z.number().optional(),
output: z.number(),
}),
modalities: z

View File

@@ -557,6 +557,7 @@ export namespace Provider {
}),
limit: z.object({
context: z.number(),
input: z.number().optional(),
output: z.number(),
}),
status: z.enum(["alpha", "beta", "deprecated", "active"]),
@@ -619,6 +620,7 @@ export namespace Provider {
},
limit: {
context: model.limit.context,
input: model.limit.input,
output: model.limit.output,
},
capabilities: {

View File

@@ -34,7 +34,7 @@ export namespace SessionCompaction {
if (context === 0) return false
const count = input.tokens.input + input.tokens.cache.read + input.tokens.output
const output = Math.min(input.model.limit.output, SessionPrompt.OUTPUT_TOKEN_MAX) || SessionPrompt.OUTPUT_TOKEN_MAX
const usable = context - output
const usable = input.model.limit.input || context - output
return count > usable
}