fix(core): ensure compaction is more reliable, add reserve token buffer to ensure that input window has enough room to compact (#12924)

Co-authored-by: James Lal <james@littlebearlabs.io>
This commit is contained in:
Aiden Cline
2026-02-10 19:55:22 -06:00
committed by GitHub
parent 60bdb6e9ba
commit 0fd6f365be
16 changed files with 262 additions and 189 deletions

View File

@@ -4,7 +4,7 @@ import { BusEvent } from "@/bus/bus-event"
import { Bus } from "@/bus"
import { Decimal } from "decimal.js"
import z from "zod"
import { type LanguageModelUsage, type ProviderMetadata } from "ai"
import { type ProviderMetadata } from "ai"
import { Config } from "../config/config"
import { Flag } from "../flag/flag"
import { Identifier } from "../id/id"
@@ -22,6 +22,8 @@ import { Snapshot } from "@/snapshot"
import type { Provider } from "@/provider/provider"
import { PermissionNext } from "@/permission/next"
import { Global } from "@/global"
import type { LanguageModelV2Usage } from "@ai-sdk/provider"
import { iife } from "@/util/iife"
export namespace Session {
const log = Log.create({ service: "session" })
@@ -439,34 +441,58 @@ export namespace Session {
export const getUsage = fn(
z.object({
model: z.custom<Provider.Model>(),
usage: z.custom<LanguageModelUsage>(),
usage: z.custom<LanguageModelV2Usage>(),
metadata: z.custom<ProviderMetadata>().optional(),
}),
(input) => {
const cacheReadInputTokens = input.usage.cachedInputTokens ?? 0
const cacheWriteInputTokens = (input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number
const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = excludesCachedTokens
? (input.usage.inputTokens ?? 0)
: (input.usage.inputTokens ?? 0) - cacheReadInputTokens - cacheWriteInputTokens
const safe = (value: number) => {
if (!Number.isFinite(value)) return 0
return value
}
const inputTokens = safe(input.usage.inputTokens ?? 0)
const outputTokens = safe(input.usage.outputTokens ?? 0)
const reasoningTokens = safe(input.usage.reasoningTokens ?? 0)
const cacheReadInputTokens = safe(input.usage.cachedInputTokens ?? 0)
const cacheWriteInputTokens = safe(
(input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number,
)
// OpenRouter provides inputTokens as the total count of input tokens (including cached).
// AFAIK other providers (OpenRouter/OpenAI/Gemini etc.) do it the same way e.g. vercel/ai#8794 (comment)
// Anthropic does it differently though - inputTokens doesn't include cached tokens.
// It looks like OpenCode's cost calculation assumes all providers return inputTokens the same way Anthropic does (I'm guessing getUsage logic was originally implemented with anthropic), so it's causing incorrect cost calculation for OpenRouter and others.
const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = safe(
excludesCachedTokens ? inputTokens : inputTokens - cacheReadInputTokens - cacheWriteInputTokens,
)
const total = iife(() => {
// Anthropic doesn't provide total_tokens, also ai sdk will vastly undercount if we
// don't compute from components
if (
input.model.api.npm === "@ai-sdk/anthropic" ||
input.model.api.npm === "@ai-sdk/amazon-bedrock" ||
input.model.api.npm === "@ai-sdk/google-vertex/anthropic"
) {
return adjustedInputTokens + outputTokens + cacheReadInputTokens + cacheWriteInputTokens
}
return input.usage.totalTokens
})
const tokens = {
input: safe(adjustedInputTokens),
output: safe(input.usage.outputTokens ?? 0),
reasoning: safe(input.usage?.reasoningTokens ?? 0),
total,
input: adjustedInputTokens,
output: outputTokens,
reasoning: reasoningTokens,
cache: {
write: safe(cacheWriteInputTokens),
read: safe(cacheReadInputTokens),
write: cacheWriteInputTokens,
read: cacheReadInputTokens,
},
}