refactor: apply minimal tfcode branding

- Rename packages/opencode → packages/tfcode (directory only)
- Rename bin/opencode → bin/tfcode (CLI binary)
- Rename .opencode → .tfcode (config directory)
- Update package.json name and bin field
- Update config directory path references (.tfcode)
- Keep internal code references as 'opencode' for easy upstream sync
- Keep @opencode-ai/* workspace package names

This minimal branding approach allows clean merges from upstream
opencode repository while providing tfcode branding for users.
This commit is contained in:
Gab
2026-03-24 13:19:59 +11:00
parent 8bcbd40e9b
commit a8b73fd754
608 changed files with 26 additions and 32 deletions

View File

@@ -0,0 +1,423 @@
import { describe, expect, test } from "bun:test"
import path from "path"
import { SessionCompaction } from "../../src/session/compaction"
import { Token } from "../../src/util/token"
import { Instance } from "../../src/project/instance"
import { Log } from "../../src/util/log"
import { tmpdir } from "../fixture/fixture"
import { Session } from "../../src/session"
import type { Provider } from "../../src/provider/provider"
Log.init({ print: false })
function createModel(opts: {
context: number
output: number
input?: number
cost?: Provider.Model["cost"]
npm?: string
}): Provider.Model {
return {
id: "test-model",
providerID: "test",
name: "Test",
limit: {
context: opts.context,
input: opts.input,
output: opts.output,
},
cost: opts.cost ?? { input: 0, output: 0, cache: { read: 0, write: 0 } },
capabilities: {
toolcall: true,
attachment: false,
reasoning: false,
temperature: true,
input: { text: true, image: false, audio: false, video: false },
output: { text: true, image: false, audio: false, video: false },
},
api: { npm: opts.npm ?? "@ai-sdk/anthropic" },
options: {},
} as Provider.Model
}
describe("session.compaction.isOverflow", () => {
test("returns true when token count exceeds usable context", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 100_000, output: 32_000 })
const tokens = { input: 75_000, output: 5_000, reasoning: 0, cache: { read: 0, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
},
})
})
test("returns false when token count within usable context", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 200_000, output: 32_000 })
const tokens = { input: 100_000, output: 10_000, reasoning: 0, cache: { read: 0, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
},
})
})
test("includes cache.read in token count", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 100_000, output: 32_000 })
const tokens = { input: 60_000, output: 10_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
},
})
})
test("respects input limit for input caps", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
const tokens = { input: 271_000, output: 1_000, reasoning: 0, cache: { read: 2_000, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
},
})
})
test("returns false when input/output are within input caps", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
const tokens = { input: 200_000, output: 20_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
},
})
})
test("returns false when output within limit with input caps", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 200_000, input: 120_000, output: 10_000 })
const tokens = { input: 50_000, output: 9_999, reasoning: 0, cache: { read: 0, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
},
})
})
// ─── Bug reproduction tests ───────────────────────────────────────────
// These tests demonstrate that when limit.input is set, isOverflow()
// does not subtract any headroom for the next model response. This means
// compaction only triggers AFTER we've already consumed the full input
// budget, leaving zero room for the next API call's output tokens.
//
// Compare: without limit.input, usable = context - output (reserves space).
// With limit.input, usable = limit.input (reserves nothing).
//
// Related issues: #10634, #8089, #11086, #12621
// Open PRs: #6875, #12924
test("BUG: no headroom when limit.input is set — compaction should trigger near boundary but does not", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
// Simulate Claude with prompt caching: input limit = 200K, output limit = 32K
const model = createModel({ context: 200_000, input: 200_000, output: 32_000 })
// We've used 198K tokens total. Only 2K under the input limit.
// On the next turn, the full conversation (198K) becomes input,
// plus the model needs room to generate output — this WILL overflow.
const tokens = { input: 180_000, output: 15_000, reasoning: 0, cache: { read: 3_000, write: 0 } }
// count = 180K + 3K + 15K = 198K
// usable = limit.input = 200K (no output subtracted!)
// 198K > 200K = false → no compaction triggered
// WITHOUT limit.input: usable = 200K - 32K = 168K, and 198K > 168K = true ✓
// WITH limit.input: usable = 200K, and 198K > 200K = false ✗
// With 198K used and only 2K headroom, the next turn will overflow.
// Compaction MUST trigger here.
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
},
})
})
test("BUG: without limit.input, same token count correctly triggers compaction", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
// Same model but without limit.input — uses context - output instead
const model = createModel({ context: 200_000, output: 32_000 })
// Same token usage as above
const tokens = { input: 180_000, output: 15_000, reasoning: 0, cache: { read: 3_000, write: 0 } }
// count = 198K
// usable = context - output = 200K - 32K = 168K
// 198K > 168K = true → compaction correctly triggered
const result = await SessionCompaction.isOverflow({ tokens, model })
expect(result).toBe(true) // ← Correct: headroom is reserved
},
})
})
test("BUG: asymmetry — limit.input model allows 30K more usage before compaction than equivalent model without it", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
// Two models with identical context/output limits, differing only in limit.input
const withInputLimit = createModel({ context: 200_000, input: 200_000, output: 32_000 })
const withoutInputLimit = createModel({ context: 200_000, output: 32_000 })
// 170K total tokens — well above context-output (168K) but below input limit (200K)
const tokens = { input: 166_000, output: 10_000, reasoning: 0, cache: { read: 5_000, write: 0 } }
const withLimit = await SessionCompaction.isOverflow({ tokens, model: withInputLimit })
const withoutLimit = await SessionCompaction.isOverflow({ tokens, model: withoutInputLimit })
// Both models have identical real capacity — they should agree:
expect(withLimit).toBe(true) // should compact (170K leaves no room for 32K output)
expect(withoutLimit).toBe(true) // correctly compacts (170K > 168K)
},
})
})
test("returns false when model context limit is 0", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 0, output: 32_000 })
const tokens = { input: 100_000, output: 10_000, reasoning: 0, cache: { read: 0, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
},
})
})
test("returns false when compaction.auto is disabled", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
compaction: { auto: false },
}),
)
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 100_000, output: 32_000 })
const tokens = { input: 75_000, output: 5_000, reasoning: 0, cache: { read: 0, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
},
})
})
})
describe("util.token.estimate", () => {
test("estimates tokens from text (4 chars per token)", () => {
const text = "x".repeat(4000)
expect(Token.estimate(text)).toBe(1000)
})
test("estimates tokens from larger text", () => {
const text = "y".repeat(20_000)
expect(Token.estimate(text)).toBe(5000)
})
test("returns 0 for empty string", () => {
expect(Token.estimate("")).toBe(0)
})
})
describe("session.getUsage", () => {
test("normalizes standard usage to token format", () => {
const model = createModel({ context: 100_000, output: 32_000 })
const result = Session.getUsage({
model,
usage: {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
},
})
expect(result.tokens.input).toBe(1000)
expect(result.tokens.output).toBe(500)
expect(result.tokens.reasoning).toBe(0)
expect(result.tokens.cache.read).toBe(0)
expect(result.tokens.cache.write).toBe(0)
})
test("extracts cached tokens to cache.read", () => {
const model = createModel({ context: 100_000, output: 32_000 })
const result = Session.getUsage({
model,
usage: {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
cachedInputTokens: 200,
},
})
expect(result.tokens.input).toBe(800)
expect(result.tokens.cache.read).toBe(200)
})
test("handles anthropic cache write metadata", () => {
const model = createModel({ context: 100_000, output: 32_000 })
const result = Session.getUsage({
model,
usage: {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
},
metadata: {
anthropic: {
cacheCreationInputTokens: 300,
},
},
})
expect(result.tokens.cache.write).toBe(300)
})
test("does not subtract cached tokens for anthropic provider", () => {
const model = createModel({ context: 100_000, output: 32_000 })
const result = Session.getUsage({
model,
usage: {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
cachedInputTokens: 200,
},
metadata: {
anthropic: {},
},
})
expect(result.tokens.input).toBe(1000)
expect(result.tokens.cache.read).toBe(200)
})
test("handles reasoning tokens", () => {
const model = createModel({ context: 100_000, output: 32_000 })
const result = Session.getUsage({
model,
usage: {
inputTokens: 1000,
outputTokens: 500,
totalTokens: 1500,
reasoningTokens: 100,
},
})
expect(result.tokens.reasoning).toBe(100)
})
test("handles undefined optional values gracefully", () => {
const model = createModel({ context: 100_000, output: 32_000 })
const result = Session.getUsage({
model,
usage: {
inputTokens: 0,
outputTokens: 0,
totalTokens: 0,
},
})
expect(result.tokens.input).toBe(0)
expect(result.tokens.output).toBe(0)
expect(result.tokens.reasoning).toBe(0)
expect(result.tokens.cache.read).toBe(0)
expect(result.tokens.cache.write).toBe(0)
expect(Number.isNaN(result.cost)).toBe(false)
})
test("calculates cost correctly", () => {
const model = createModel({
context: 100_000,
output: 32_000,
cost: {
input: 3,
output: 15,
cache: { read: 0.3, write: 3.75 },
},
})
const result = Session.getUsage({
model,
usage: {
inputTokens: 1_000_000,
outputTokens: 100_000,
totalTokens: 1_100_000,
},
})
expect(result.cost).toBe(3 + 1.5)
})
test.each(["@ai-sdk/anthropic", "@ai-sdk/amazon-bedrock", "@ai-sdk/google-vertex/anthropic"])(
"computes total from components for %s models",
(npm) => {
const model = createModel({ context: 100_000, output: 32_000, npm })
const usage = {
inputTokens: 1000,
outputTokens: 500,
// These providers typically report total as input + output only,
// excluding cache read/write.
totalTokens: 1500,
cachedInputTokens: 200,
}
if (npm === "@ai-sdk/amazon-bedrock") {
const result = Session.getUsage({
model,
usage,
metadata: {
bedrock: {
usage: {
cacheWriteInputTokens: 300,
},
},
},
})
expect(result.tokens.input).toBe(1000)
expect(result.tokens.cache.read).toBe(200)
expect(result.tokens.cache.write).toBe(300)
expect(result.tokens.total).toBe(2000)
return
}
const result = Session.getUsage({
model,
usage,
metadata: {
anthropic: {
cacheCreationInputTokens: 300,
},
},
})
expect(result.tokens.input).toBe(1000)
expect(result.tokens.cache.read).toBe(200)
expect(result.tokens.cache.write).toBe(300)
expect(result.tokens.total).toBe(2000)
},
)
})

View File

@@ -0,0 +1,170 @@
import { afterEach, beforeEach, describe, expect, test } from "bun:test"
import path from "path"
import { InstructionPrompt } from "../../src/session/instruction"
import { Instance } from "../../src/project/instance"
import { Global } from "../../src/global"
import { tmpdir } from "../fixture/fixture"
describe("InstructionPrompt.resolve", () => {
test("returns empty when AGENTS.md is at project root (already in systemPaths)", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "AGENTS.md"), "# Root Instructions")
await Bun.write(path.join(dir, "src", "file.ts"), "const x = 1")
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const system = await InstructionPrompt.systemPaths()
expect(system.has(path.join(tmp.path, "AGENTS.md"))).toBe(true)
const results = await InstructionPrompt.resolve([], path.join(tmp.path, "src", "file.ts"), "test-message-1")
expect(results).toEqual([])
},
})
})
test("returns AGENTS.md from subdirectory (not in systemPaths)", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "subdir", "AGENTS.md"), "# Subdir Instructions")
await Bun.write(path.join(dir, "subdir", "nested", "file.ts"), "const x = 1")
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const system = await InstructionPrompt.systemPaths()
expect(system.has(path.join(tmp.path, "subdir", "AGENTS.md"))).toBe(false)
const results = await InstructionPrompt.resolve(
[],
path.join(tmp.path, "subdir", "nested", "file.ts"),
"test-message-2",
)
expect(results.length).toBe(1)
expect(results[0].filepath).toBe(path.join(tmp.path, "subdir", "AGENTS.md"))
},
})
})
test("doesn't reload AGENTS.md when reading it directly", async () => {
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "subdir", "AGENTS.md"), "# Subdir Instructions")
await Bun.write(path.join(dir, "subdir", "nested", "file.ts"), "const x = 1")
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const filepath = path.join(tmp.path, "subdir", "AGENTS.md")
const system = await InstructionPrompt.systemPaths()
expect(system.has(filepath)).toBe(false)
const results = await InstructionPrompt.resolve([], filepath, "test-message-2")
expect(results).toEqual([])
},
})
})
})
describe("InstructionPrompt.systemPaths OPENCODE_CONFIG_DIR", () => {
let originalConfigDir: string | undefined
beforeEach(() => {
originalConfigDir = process.env["OPENCODE_CONFIG_DIR"]
})
afterEach(() => {
if (originalConfigDir === undefined) {
delete process.env["OPENCODE_CONFIG_DIR"]
} else {
process.env["OPENCODE_CONFIG_DIR"] = originalConfigDir
}
})
test("prefers OPENCODE_CONFIG_DIR AGENTS.md over global when both exist", async () => {
await using profileTmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "AGENTS.md"), "# Profile Instructions")
},
})
await using globalTmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "AGENTS.md"), "# Global Instructions")
},
})
await using projectTmp = await tmpdir()
process.env["OPENCODE_CONFIG_DIR"] = profileTmp.path
const originalGlobalConfig = Global.Path.config
;(Global.Path as { config: string }).config = globalTmp.path
try {
await Instance.provide({
directory: projectTmp.path,
fn: async () => {
const paths = await InstructionPrompt.systemPaths()
expect(paths.has(path.join(profileTmp.path, "AGENTS.md"))).toBe(true)
expect(paths.has(path.join(globalTmp.path, "AGENTS.md"))).toBe(false)
},
})
} finally {
;(Global.Path as { config: string }).config = originalGlobalConfig
}
})
test("falls back to global AGENTS.md when OPENCODE_CONFIG_DIR has no AGENTS.md", async () => {
await using profileTmp = await tmpdir()
await using globalTmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "AGENTS.md"), "# Global Instructions")
},
})
await using projectTmp = await tmpdir()
process.env["OPENCODE_CONFIG_DIR"] = profileTmp.path
const originalGlobalConfig = Global.Path.config
;(Global.Path as { config: string }).config = globalTmp.path
try {
await Instance.provide({
directory: projectTmp.path,
fn: async () => {
const paths = await InstructionPrompt.systemPaths()
expect(paths.has(path.join(profileTmp.path, "AGENTS.md"))).toBe(false)
expect(paths.has(path.join(globalTmp.path, "AGENTS.md"))).toBe(true)
},
})
} finally {
;(Global.Path as { config: string }).config = originalGlobalConfig
}
})
test("uses global AGENTS.md when OPENCODE_CONFIG_DIR is not set", async () => {
await using globalTmp = await tmpdir({
init: async (dir) => {
await Bun.write(path.join(dir, "AGENTS.md"), "# Global Instructions")
},
})
await using projectTmp = await tmpdir()
delete process.env["OPENCODE_CONFIG_DIR"]
const originalGlobalConfig = Global.Path.config
;(Global.Path as { config: string }).config = globalTmp.path
try {
await Instance.provide({
directory: projectTmp.path,
fn: async () => {
const paths = await InstructionPrompt.systemPaths()
expect(paths.has(path.join(globalTmp.path, "AGENTS.md"))).toBe(true)
},
})
} finally {
;(Global.Path as { config: string }).config = originalGlobalConfig
}
})
})

View File

@@ -0,0 +1,759 @@
import { afterAll, beforeAll, beforeEach, describe, expect, test } from "bun:test"
import path from "path"
import { tool, type ModelMessage } from "ai"
import z from "zod"
import { LLM } from "../../src/session/llm"
import { Global } from "../../src/global"
import { Instance } from "../../src/project/instance"
import { Provider } from "../../src/provider/provider"
import { ProviderTransform } from "../../src/provider/transform"
import { ModelsDev } from "../../src/provider/models"
import { ProviderID, ModelID } from "../../src/provider/schema"
import { Filesystem } from "../../src/util/filesystem"
import { tmpdir } from "../fixture/fixture"
import type { Agent } from "../../src/agent/agent"
import type { MessageV2 } from "../../src/session/message-v2"
import { SessionID, MessageID } from "../../src/session/schema"
describe("session.llm.hasToolCalls", () => {
test("returns false for empty messages array", () => {
expect(LLM.hasToolCalls([])).toBe(false)
})
test("returns false for messages with only text content", () => {
const messages: ModelMessage[] = [
{
role: "user",
content: [{ type: "text", text: "Hello" }],
},
{
role: "assistant",
content: [{ type: "text", text: "Hi there" }],
},
]
expect(LLM.hasToolCalls(messages)).toBe(false)
})
test("returns true when messages contain tool-call", () => {
const messages = [
{
role: "user",
content: [{ type: "text", text: "Run a command" }],
},
{
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: "call-123",
toolName: "bash",
},
],
},
] as ModelMessage[]
expect(LLM.hasToolCalls(messages)).toBe(true)
})
test("returns true when messages contain tool-result", () => {
const messages = [
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-123",
toolName: "bash",
},
],
},
] as ModelMessage[]
expect(LLM.hasToolCalls(messages)).toBe(true)
})
test("returns false for messages with string content", () => {
const messages: ModelMessage[] = [
{
role: "user",
content: "Hello world",
},
{
role: "assistant",
content: "Hi there",
},
]
expect(LLM.hasToolCalls(messages)).toBe(false)
})
test("returns true when tool-call is mixed with text content", () => {
const messages = [
{
role: "assistant",
content: [
{ type: "text", text: "Let me run that command" },
{
type: "tool-call",
toolCallId: "call-456",
toolName: "read",
},
],
},
] as ModelMessage[]
expect(LLM.hasToolCalls(messages)).toBe(true)
})
})
type Capture = {
url: URL
headers: Headers
body: Record<string, unknown>
}
const state = {
server: null as ReturnType<typeof Bun.serve> | null,
queue: [] as Array<{ path: string; response: Response; resolve: (value: Capture) => void }>,
}
function deferred<T>() {
const result = {} as { promise: Promise<T>; resolve: (value: T) => void }
result.promise = new Promise((resolve) => {
result.resolve = resolve
})
return result
}
function waitRequest(pathname: string, response: Response) {
const pending = deferred<Capture>()
state.queue.push({ path: pathname, response, resolve: pending.resolve })
return pending.promise
}
beforeAll(() => {
state.server = Bun.serve({
port: 0,
async fetch(req) {
const next = state.queue.shift()
if (!next) {
return new Response("unexpected request", { status: 500 })
}
const url = new URL(req.url)
const body = (await req.json()) as Record<string, unknown>
next.resolve({ url, headers: req.headers, body })
if (!url.pathname.endsWith(next.path)) {
return new Response("not found", { status: 404 })
}
return next.response
},
})
})
beforeEach(() => {
state.queue.length = 0
})
afterAll(() => {
state.server?.stop()
})
function createChatStream(text: string) {
const payload =
[
`data: ${JSON.stringify({
id: "chatcmpl-1",
object: "chat.completion.chunk",
choices: [{ delta: { role: "assistant" } }],
})}`,
`data: ${JSON.stringify({
id: "chatcmpl-1",
object: "chat.completion.chunk",
choices: [{ delta: { content: text } }],
})}`,
`data: ${JSON.stringify({
id: "chatcmpl-1",
object: "chat.completion.chunk",
choices: [{ delta: {}, finish_reason: "stop" }],
})}`,
"data: [DONE]",
].join("\n\n") + "\n\n"
const encoder = new TextEncoder()
return new ReadableStream<Uint8Array>({
start(controller) {
controller.enqueue(encoder.encode(payload))
controller.close()
},
})
}
async function loadFixture(providerID: string, modelID: string) {
const fixturePath = path.join(import.meta.dir, "../tool/fixtures/models-api.json")
const data = await Filesystem.readJson<Record<string, ModelsDev.Provider>>(fixturePath)
const provider = data[providerID]
if (!provider) {
throw new Error(`Missing provider in fixture: ${providerID}`)
}
const model = provider.models[modelID]
if (!model) {
throw new Error(`Missing model in fixture: ${modelID}`)
}
return { provider, model }
}
function createEventStream(chunks: unknown[], includeDone = false) {
const lines = chunks.map((chunk) => `data: ${typeof chunk === "string" ? chunk : JSON.stringify(chunk)}`)
if (includeDone) {
lines.push("data: [DONE]")
}
const payload = lines.join("\n\n") + "\n\n"
const encoder = new TextEncoder()
return new ReadableStream<Uint8Array>({
start(controller) {
controller.enqueue(encoder.encode(payload))
controller.close()
},
})
}
function createEventResponse(chunks: unknown[], includeDone = false) {
return new Response(createEventStream(chunks, includeDone), {
status: 200,
headers: { "Content-Type": "text/event-stream" },
})
}
describe("session.llm.stream", () => {
test("sends temperature, tokens, and reasoning options for openai-compatible models", async () => {
const server = state.server
if (!server) {
throw new Error("Server not initialized")
}
const providerID = "alibaba"
const modelID = "qwen-plus"
const fixture = await loadFixture(providerID, modelID)
const provider = fixture.provider
const model = fixture.model
const request = waitRequest(
"/chat/completions",
new Response(createChatStream("Hello"), {
status: 200,
headers: { "Content-Type": "text/event-stream" },
}),
)
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
enabled_providers: [providerID],
provider: {
[providerID]: {
options: {
apiKey: "test-key",
baseURL: `${server.url.origin}/v1`,
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
const sessionID = SessionID.make("session-test-1")
const agent = {
name: "test",
mode: "primary",
options: {},
permission: [{ permission: "*", pattern: "*", action: "allow" }],
temperature: 0.4,
topP: 0.8,
} satisfies Agent.Info
const user = {
id: MessageID.make("user-1"),
sessionID,
role: "user",
time: { created: Date.now() },
agent: agent.name,
model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
variant: "high",
} satisfies MessageV2.User
const stream = await LLM.stream({
user,
sessionID,
model: resolved,
agent,
system: ["You are a helpful assistant."],
abort: new AbortController().signal,
messages: [{ role: "user", content: "Hello" }],
tools: {},
})
for await (const _ of stream.fullStream) {
}
const capture = await request
const body = capture.body
const headers = capture.headers
const url = capture.url
expect(url.pathname.startsWith("/v1/")).toBe(true)
expect(url.pathname.endsWith("/chat/completions")).toBe(true)
expect(headers.get("Authorization")).toBe("Bearer test-key")
expect(body.model).toBe(resolved.api.id)
expect(body.temperature).toBe(0.4)
expect(body.top_p).toBe(0.8)
expect(body.stream).toBe(true)
const maxTokens = (body.max_tokens as number | undefined) ?? (body.max_output_tokens as number | undefined)
const expectedMaxTokens = ProviderTransform.maxOutputTokens(resolved)
expect(maxTokens).toBe(expectedMaxTokens)
const reasoning = (body.reasoningEffort as string | undefined) ?? (body.reasoning_effort as string | undefined)
expect(reasoning).toBe("high")
},
})
})
test("keeps tools enabled by prompt permissions", async () => {
const server = state.server
if (!server) {
throw new Error("Server not initialized")
}
const providerID = "alibaba"
const modelID = "qwen-plus"
const fixture = await loadFixture(providerID, modelID)
const model = fixture.model
const request = waitRequest(
"/chat/completions",
new Response(createChatStream("Hello"), {
status: 200,
headers: { "Content-Type": "text/event-stream" },
}),
)
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
enabled_providers: [providerID],
provider: {
[providerID]: {
options: {
apiKey: "test-key",
baseURL: `${server.url.origin}/v1`,
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
const sessionID = SessionID.make("session-test-tools")
const agent = {
name: "test",
mode: "primary",
options: {},
permission: [{ permission: "question", pattern: "*", action: "deny" }],
} satisfies Agent.Info
const user = {
id: MessageID.make("user-tools"),
sessionID,
role: "user",
time: { created: Date.now() },
agent: agent.name,
model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
tools: { question: true },
} satisfies MessageV2.User
const stream = await LLM.stream({
user,
sessionID,
model: resolved,
agent,
permission: [{ permission: "question", pattern: "*", action: "allow" }],
system: ["You are a helpful assistant."],
abort: new AbortController().signal,
messages: [{ role: "user", content: "Hello" }],
tools: {
question: tool({
description: "Ask a question",
inputSchema: z.object({}),
execute: async () => ({ output: "" }),
}),
},
})
for await (const _ of stream.fullStream) {
}
const capture = await request
const tools = capture.body.tools as Array<{ function?: { name?: string } }> | undefined
expect(tools?.some((item) => item.function?.name === "question")).toBe(true)
},
})
})
test("sends responses API payload for OpenAI models", async () => {
const server = state.server
if (!server) {
throw new Error("Server not initialized")
}
const source = await loadFixture("openai", "gpt-5.2")
const model = source.model
const responseChunks = [
{
type: "response.created",
response: {
id: "resp-1",
created_at: Math.floor(Date.now() / 1000),
model: model.id,
service_tier: null,
},
},
{
type: "response.output_text.delta",
item_id: "item-1",
delta: "Hello",
logprobs: null,
},
{
type: "response.completed",
response: {
incomplete_details: null,
usage: {
input_tokens: 1,
input_tokens_details: null,
output_tokens: 1,
output_tokens_details: null,
},
service_tier: null,
},
},
]
const request = waitRequest("/responses", createEventResponse(responseChunks, true))
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
enabled_providers: ["openai"],
provider: {
openai: {
name: "OpenAI",
env: ["OPENAI_API_KEY"],
npm: "@ai-sdk/openai",
api: "https://api.openai.com/v1",
models: {
[model.id]: model,
},
options: {
apiKey: "test-openai-key",
baseURL: `${server.url.origin}/v1`,
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const resolved = await Provider.getModel(ProviderID.openai, ModelID.make(model.id))
const sessionID = SessionID.make("session-test-2")
const agent = {
name: "test",
mode: "primary",
options: {},
permission: [{ permission: "*", pattern: "*", action: "allow" }],
temperature: 0.2,
} satisfies Agent.Info
const user = {
id: MessageID.make("user-2"),
sessionID,
role: "user",
time: { created: Date.now() },
agent: agent.name,
model: { providerID: ProviderID.make("openai"), modelID: resolved.id },
variant: "high",
} satisfies MessageV2.User
const stream = await LLM.stream({
user,
sessionID,
model: resolved,
agent,
system: ["You are a helpful assistant."],
abort: new AbortController().signal,
messages: [{ role: "user", content: "Hello" }],
tools: {},
})
for await (const _ of stream.fullStream) {
}
const capture = await request
const body = capture.body
expect(capture.url.pathname.endsWith("/responses")).toBe(true)
expect(body.model).toBe(resolved.api.id)
expect(body.stream).toBe(true)
expect((body.reasoning as { effort?: string } | undefined)?.effort).toBe("high")
const maxTokens = body.max_output_tokens as number | undefined
const expectedMaxTokens = ProviderTransform.maxOutputTokens(resolved)
expect(maxTokens).toBe(expectedMaxTokens)
},
})
})
test("sends messages API payload for Anthropic models", async () => {
const server = state.server
if (!server) {
throw new Error("Server not initialized")
}
const providerID = "anthropic"
const modelID = "claude-3-5-sonnet-20241022"
const fixture = await loadFixture(providerID, modelID)
const provider = fixture.provider
const model = fixture.model
const chunks = [
{
type: "message_start",
message: {
id: "msg-1",
model: model.id,
usage: {
input_tokens: 3,
cache_creation_input_tokens: null,
cache_read_input_tokens: null,
},
},
},
{
type: "content_block_start",
index: 0,
content_block: { type: "text", text: "" },
},
{
type: "content_block_delta",
index: 0,
delta: { type: "text_delta", text: "Hello" },
},
{ type: "content_block_stop", index: 0 },
{
type: "message_delta",
delta: { stop_reason: "end_turn", stop_sequence: null, container: null },
usage: {
input_tokens: 3,
output_tokens: 2,
cache_creation_input_tokens: null,
cache_read_input_tokens: null,
},
},
{ type: "message_stop" },
]
const request = waitRequest("/messages", createEventResponse(chunks))
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
enabled_providers: [providerID],
provider: {
[providerID]: {
options: {
apiKey: "test-anthropic-key",
baseURL: `${server.url.origin}/v1`,
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
const sessionID = SessionID.make("session-test-3")
const agent = {
name: "test",
mode: "primary",
options: {},
permission: [{ permission: "*", pattern: "*", action: "allow" }],
temperature: 0.4,
topP: 0.9,
} satisfies Agent.Info
const user = {
id: MessageID.make("user-3"),
sessionID,
role: "user",
time: { created: Date.now() },
agent: agent.name,
model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
} satisfies MessageV2.User
const stream = await LLM.stream({
user,
sessionID,
model: resolved,
agent,
system: ["You are a helpful assistant."],
abort: new AbortController().signal,
messages: [{ role: "user", content: "Hello" }],
tools: {},
})
for await (const _ of stream.fullStream) {
}
const capture = await request
const body = capture.body
expect(capture.url.pathname.endsWith("/messages")).toBe(true)
expect(body.model).toBe(resolved.api.id)
expect(body.max_tokens).toBe(ProviderTransform.maxOutputTokens(resolved))
expect(body.temperature).toBe(0.4)
expect(body.top_p).toBe(0.9)
},
})
})
test("sends Google API payload for Gemini models", async () => {
const server = state.server
if (!server) {
throw new Error("Server not initialized")
}
const providerID = "google"
const modelID = "gemini-2.5-flash"
const fixture = await loadFixture(providerID, modelID)
const provider = fixture.provider
const model = fixture.model
const pathSuffix = `/v1beta/models/${model.id}:streamGenerateContent`
const chunks = [
{
candidates: [
{
content: {
parts: [{ text: "Hello" }],
},
finishReason: "STOP",
},
],
usageMetadata: {
promptTokenCount: 1,
candidatesTokenCount: 1,
totalTokenCount: 2,
},
},
]
const request = waitRequest(pathSuffix, createEventResponse(chunks))
await using tmp = await tmpdir({
init: async (dir) => {
await Bun.write(
path.join(dir, "opencode.json"),
JSON.stringify({
$schema: "https://opencode.ai/config.json",
enabled_providers: [providerID],
provider: {
[providerID]: {
options: {
apiKey: "test-google-key",
baseURL: `${server.url.origin}/v1beta`,
},
},
},
}),
)
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
const sessionID = SessionID.make("session-test-4")
const agent = {
name: "test",
mode: "primary",
options: {},
permission: [{ permission: "*", pattern: "*", action: "allow" }],
temperature: 0.3,
topP: 0.8,
} satisfies Agent.Info
const user = {
id: MessageID.make("user-4"),
sessionID,
role: "user",
time: { created: Date.now() },
agent: agent.name,
model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
} satisfies MessageV2.User
const stream = await LLM.stream({
user,
sessionID,
model: resolved,
agent,
system: ["You are a helpful assistant."],
abort: new AbortController().signal,
messages: [{ role: "user", content: "Hello" }],
tools: {},
})
for await (const _ of stream.fullStream) {
}
const capture = await request
const body = capture.body
const config = body.generationConfig as
| { temperature?: number; topP?: number; maxOutputTokens?: number }
| undefined
expect(capture.url.pathname).toBe(pathSuffix)
expect(config?.temperature).toBe(0.3)
expect(config?.topP).toBe(0.8)
expect(config?.maxOutputTokens).toBe(ProviderTransform.maxOutputTokens(resolved))
},
})
})
})

View File

@@ -0,0 +1,930 @@
import { describe, expect, test } from "bun:test"
import { APICallError } from "ai"
import { MessageV2 } from "../../src/session/message-v2"
import type { Provider } from "../../src/provider/provider"
import { ModelID, ProviderID } from "../../src/provider/schema"
import { SessionID, MessageID, PartID } from "../../src/session/schema"
import { Question } from "../../src/question"
const sessionID = SessionID.make("session")
const providerID = ProviderID.make("test")
const model: Provider.Model = {
id: ModelID.make("test-model"),
providerID,
api: {
id: "test-model",
url: "https://example.com",
npm: "@ai-sdk/openai",
},
name: "Test Model",
capabilities: {
temperature: true,
reasoning: false,
attachment: false,
toolcall: true,
input: {
text: true,
audio: false,
image: false,
video: false,
pdf: false,
},
output: {
text: true,
audio: false,
image: false,
video: false,
pdf: false,
},
interleaved: false,
},
cost: {
input: 0,
output: 0,
cache: {
read: 0,
write: 0,
},
},
limit: {
context: 0,
input: 0,
output: 0,
},
status: "active",
options: {},
headers: {},
release_date: "2026-01-01",
}
function userInfo(id: string): MessageV2.User {
return {
id,
sessionID,
role: "user",
time: { created: 0 },
agent: "user",
model: { providerID, modelID: ModelID.make("test") },
tools: {},
mode: "",
} as unknown as MessageV2.User
}
function assistantInfo(
id: string,
parentID: string,
error?: MessageV2.Assistant["error"],
meta?: { providerID: string; modelID: string },
): MessageV2.Assistant {
const infoModel = meta ?? { providerID: model.providerID, modelID: model.api.id }
return {
id,
sessionID,
role: "assistant",
time: { created: 0 },
error,
parentID,
modelID: infoModel.modelID,
providerID: infoModel.providerID,
mode: "",
agent: "agent",
path: { cwd: "/", root: "/" },
cost: 0,
tokens: {
input: 0,
output: 0,
reasoning: 0,
cache: { read: 0, write: 0 },
},
} as unknown as MessageV2.Assistant
}
function basePart(messageID: string, id: string) {
return {
id: PartID.make(id),
sessionID,
messageID: MessageID.make(messageID),
}
}
describe("session.message-v2.toModelMessage", () => {
test("filters out messages with no parts", () => {
const input: MessageV2.WithParts[] = [
{
info: userInfo("m-empty"),
parts: [],
},
{
info: userInfo("m-user"),
parts: [
{
...basePart("m-user", "p1"),
type: "text",
text: "hello",
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "hello" }],
},
])
})
test("filters out messages with only ignored parts", () => {
const messageID = "m-user"
const input: MessageV2.WithParts[] = [
{
info: userInfo(messageID),
parts: [
{
...basePart(messageID, "p1"),
type: "text",
text: "ignored",
ignored: true,
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([])
})
test("includes synthetic text parts", () => {
const messageID = "m-user"
const input: MessageV2.WithParts[] = [
{
info: userInfo(messageID),
parts: [
{
...basePart(messageID, "p1"),
type: "text",
text: "hello",
synthetic: true,
},
] as MessageV2.Part[],
},
{
info: assistantInfo("m-assistant", messageID),
parts: [
{
...basePart("m-assistant", "a1"),
type: "text",
text: "assistant",
synthetic: true,
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "hello" }],
},
{
role: "assistant",
content: [{ type: "text", text: "assistant" }],
},
])
})
test("converts user text/file parts and injects compaction/subtask prompts", () => {
const messageID = "m-user"
const input: MessageV2.WithParts[] = [
{
info: userInfo(messageID),
parts: [
{
...basePart(messageID, "p1"),
type: "text",
text: "hello",
},
{
...basePart(messageID, "p2"),
type: "text",
text: "ignored",
ignored: true,
},
{
...basePart(messageID, "p3"),
type: "file",
mime: "image/png",
filename: "img.png",
url: "https://example.com/img.png",
},
{
...basePart(messageID, "p4"),
type: "file",
mime: "text/plain",
filename: "note.txt",
url: "https://example.com/note.txt",
},
{
...basePart(messageID, "p5"),
type: "file",
mime: "application/x-directory",
filename: "dir",
url: "https://example.com/dir",
},
{
...basePart(messageID, "p6"),
type: "compaction",
auto: true,
},
{
...basePart(messageID, "p7"),
type: "subtask",
prompt: "prompt",
description: "desc",
agent: "agent",
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [
{ type: "text", text: "hello" },
{
type: "file",
mediaType: "image/png",
filename: "img.png",
data: "https://example.com/img.png",
},
{ type: "text", text: "What did we do so far?" },
{ type: "text", text: "The following tool was executed by the user" },
],
},
])
})
test("converts assistant tool completion into tool-call + tool-result messages with attachments", () => {
const userID = "m-user"
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
{
info: userInfo(userID),
parts: [
{
...basePart(userID, "u1"),
type: "text",
text: "run tool",
},
] as MessageV2.Part[],
},
{
info: assistantInfo(assistantID, userID),
parts: [
{
...basePart(assistantID, "a1"),
type: "text",
text: "done",
metadata: { openai: { assistant: "meta" } },
},
{
...basePart(assistantID, "a2"),
type: "tool",
callID: "call-1",
tool: "bash",
state: {
status: "completed",
input: { cmd: "ls" },
output: "ok",
title: "Bash",
metadata: {},
time: { start: 0, end: 1 },
attachments: [
{
...basePart(assistantID, "file-1"),
type: "file",
mime: "image/png",
filename: "attachment.png",
url: "data:image/png;base64,Zm9v",
},
],
},
metadata: { openai: { tool: "meta" } },
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
},
{
role: "assistant",
content: [
{ type: "text", text: "done", providerOptions: { openai: { assistant: "meta" } } },
{
type: "tool-call",
toolCallId: "call-1",
toolName: "bash",
input: { cmd: "ls" },
providerExecuted: undefined,
providerOptions: { openai: { tool: "meta" } },
},
],
},
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-1",
toolName: "bash",
output: {
type: "content",
value: [
{ type: "text", text: "ok" },
{ type: "media", mediaType: "image/png", data: "Zm9v" },
],
},
providerOptions: { openai: { tool: "meta" } },
},
],
},
])
})
test("omits provider metadata when assistant model differs", () => {
const userID = "m-user"
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
{
info: userInfo(userID),
parts: [
{
...basePart(userID, "u1"),
type: "text",
text: "run tool",
},
] as MessageV2.Part[],
},
{
info: assistantInfo(assistantID, userID, undefined, { providerID: "other", modelID: "other" }),
parts: [
{
...basePart(assistantID, "a1"),
type: "text",
text: "done",
metadata: { openai: { assistant: "meta" } },
},
{
...basePart(assistantID, "a2"),
type: "tool",
callID: "call-1",
tool: "bash",
state: {
status: "completed",
input: { cmd: "ls" },
output: "ok",
title: "Bash",
metadata: {},
time: { start: 0, end: 1 },
},
metadata: { openai: { tool: "meta" } },
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
},
{
role: "assistant",
content: [
{ type: "text", text: "done" },
{
type: "tool-call",
toolCallId: "call-1",
toolName: "bash",
input: { cmd: "ls" },
providerExecuted: undefined,
},
],
},
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-1",
toolName: "bash",
output: { type: "text", value: "ok" },
},
],
},
])
})
test("replaces compacted tool output with placeholder", () => {
const userID = "m-user"
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
{
info: userInfo(userID),
parts: [
{
...basePart(userID, "u1"),
type: "text",
text: "run tool",
},
] as MessageV2.Part[],
},
{
info: assistantInfo(assistantID, userID),
parts: [
{
...basePart(assistantID, "a1"),
type: "tool",
callID: "call-1",
tool: "bash",
state: {
status: "completed",
input: { cmd: "ls" },
output: "this should be cleared",
title: "Bash",
metadata: {},
time: { start: 0, end: 1, compacted: 1 },
},
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
},
{
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: "call-1",
toolName: "bash",
input: { cmd: "ls" },
providerExecuted: undefined,
},
],
},
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-1",
toolName: "bash",
output: { type: "text", value: "[Old tool result content cleared]" },
},
],
},
])
})
test("converts assistant tool error into error-text tool result", () => {
const userID = "m-user"
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
{
info: userInfo(userID),
parts: [
{
...basePart(userID, "u1"),
type: "text",
text: "run tool",
},
] as MessageV2.Part[],
},
{
info: assistantInfo(assistantID, userID),
parts: [
{
...basePart(assistantID, "a1"),
type: "tool",
callID: "call-1",
tool: "bash",
state: {
status: "error",
input: { cmd: "ls" },
error: "nope",
time: { start: 0, end: 1 },
metadata: {},
},
metadata: { openai: { tool: "meta" } },
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
},
{
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: "call-1",
toolName: "bash",
input: { cmd: "ls" },
providerExecuted: undefined,
providerOptions: { openai: { tool: "meta" } },
},
],
},
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-1",
toolName: "bash",
output: { type: "error-text", value: "nope" },
providerOptions: { openai: { tool: "meta" } },
},
],
},
])
})
test("filters assistant messages with non-abort errors", () => {
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
{
info: assistantInfo(
assistantID,
"m-parent",
new MessageV2.APIError({ message: "boom", isRetryable: true }).toObject() as MessageV2.APIError,
),
parts: [
{
...basePart(assistantID, "a1"),
type: "text",
text: "should not render",
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([])
})
test("includes aborted assistant messages only when they have non-step-start/reasoning content", () => {
const assistantID1 = "m-assistant-1"
const assistantID2 = "m-assistant-2"
const aborted = new MessageV2.AbortedError({ message: "aborted" }).toObject() as MessageV2.Assistant["error"]
const input: MessageV2.WithParts[] = [
{
info: assistantInfo(assistantID1, "m-parent", aborted),
parts: [
{
...basePart(assistantID1, "a1"),
type: "reasoning",
text: "thinking",
time: { start: 0 },
},
{
...basePart(assistantID1, "a2"),
type: "text",
text: "partial answer",
},
] as MessageV2.Part[],
},
{
info: assistantInfo(assistantID2, "m-parent", aborted),
parts: [
{
...basePart(assistantID2, "b1"),
type: "step-start",
},
{
...basePart(assistantID2, "b2"),
type: "reasoning",
text: "thinking",
time: { start: 0 },
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "assistant",
content: [
{ type: "reasoning", text: "thinking", providerOptions: undefined },
{ type: "text", text: "partial answer" },
],
},
])
})
test("splits assistant messages on step-start boundaries", () => {
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
{
info: assistantInfo(assistantID, "m-parent"),
parts: [
{
...basePart(assistantID, "p1"),
type: "text",
text: "first",
},
{
...basePart(assistantID, "p2"),
type: "step-start",
},
{
...basePart(assistantID, "p3"),
type: "text",
text: "second",
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
{
role: "assistant",
content: [{ type: "text", text: "first" }],
},
{
role: "assistant",
content: [{ type: "text", text: "second" }],
},
])
})
test("drops messages that only contain step-start parts", () => {
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
{
info: assistantInfo(assistantID, "m-parent"),
parts: [
{
...basePart(assistantID, "p1"),
type: "step-start",
},
] as MessageV2.Part[],
},
]
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([])
})
test("converts pending/running tool calls to error results to prevent dangling tool_use", () => {
const userID = "m-user"
const assistantID = "m-assistant"
const input: MessageV2.WithParts[] = [
{
info: userInfo(userID),
parts: [
{
...basePart(userID, "u1"),
type: "text",
text: "run tool",
},
] as MessageV2.Part[],
},
{
info: assistantInfo(assistantID, userID),
parts: [
{
...basePart(assistantID, "a1"),
type: "tool",
callID: "call-pending",
tool: "bash",
state: {
status: "pending",
input: { cmd: "ls" },
raw: "",
},
},
{
...basePart(assistantID, "a2"),
type: "tool",
callID: "call-running",
tool: "read",
state: {
status: "running",
input: { path: "/tmp" },
time: { start: 0 },
},
},
] as MessageV2.Part[],
},
]
const result = MessageV2.toModelMessages(input, model)
expect(result).toStrictEqual([
{
role: "user",
content: [{ type: "text", text: "run tool" }],
},
{
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: "call-pending",
toolName: "bash",
input: { cmd: "ls" },
providerExecuted: undefined,
},
{
type: "tool-call",
toolCallId: "call-running",
toolName: "read",
input: { path: "/tmp" },
providerExecuted: undefined,
},
],
},
{
role: "tool",
content: [
{
type: "tool-result",
toolCallId: "call-pending",
toolName: "bash",
output: { type: "error-text", value: "[Tool execution was interrupted]" },
},
{
type: "tool-result",
toolCallId: "call-running",
toolName: "read",
output: { type: "error-text", value: "[Tool execution was interrupted]" },
},
],
},
])
})
})
describe("session.message-v2.fromError", () => {
test("serializes context_length_exceeded as ContextOverflowError", () => {
const input = {
type: "error",
error: {
code: "context_length_exceeded",
},
}
const result = MessageV2.fromError(input, { providerID })
expect(result).toStrictEqual({
name: "ContextOverflowError",
data: {
message: "Input exceeds context window of this model",
responseBody: JSON.stringify(input),
},
})
})
test("serializes response error codes", () => {
const cases = [
{
code: "insufficient_quota",
message: "Quota exceeded. Check your plan and billing details.",
},
{
code: "usage_not_included",
message: "To use Codex with your ChatGPT plan, upgrade to Plus: https://chatgpt.com/explore/plus.",
},
{
code: "invalid_prompt",
message: "Invalid prompt from test",
},
]
cases.forEach((item) => {
const input = {
type: "error",
error: {
code: item.code,
message: item.code === "invalid_prompt" ? item.message : undefined,
},
}
const result = MessageV2.fromError(input, { providerID })
expect(result).toStrictEqual({
name: "APIError",
data: {
message: item.message,
isRetryable: false,
responseBody: JSON.stringify(input),
},
})
})
})
test("detects context overflow from APICallError provider messages", () => {
const cases = [
"prompt is too long: 213462 tokens > 200000 maximum",
"Your input exceeds the context window of this model",
"The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)",
"Please reduce the length of the messages or completion",
"400 status code (no body)",
"413 status code (no body)",
]
cases.forEach((message) => {
const error = new APICallError({
message,
url: "https://example.com",
requestBodyValues: {},
statusCode: 400,
responseHeaders: { "content-type": "application/json" },
isRetryable: false,
})
const result = MessageV2.fromError(error, { providerID })
expect(MessageV2.ContextOverflowError.isInstance(result)).toBe(true)
})
})
test("detects context overflow from context_length_exceeded code in response body", () => {
const error = new APICallError({
message: "Request failed",
url: "https://example.com",
requestBodyValues: {},
statusCode: 422,
responseHeaders: { "content-type": "application/json" },
responseBody: JSON.stringify({
error: {
message: "Some message",
type: "invalid_request_error",
code: "context_length_exceeded",
},
}),
isRetryable: false,
})
const result = MessageV2.fromError(error, { providerID })
expect(MessageV2.ContextOverflowError.isInstance(result)).toBe(true)
})
test("does not classify 429 no body as context overflow", () => {
const result = MessageV2.fromError(
new APICallError({
message: "429 status code (no body)",
url: "https://example.com",
requestBodyValues: {},
statusCode: 429,
responseHeaders: { "content-type": "application/json" },
isRetryable: false,
}),
{ providerID },
)
expect(MessageV2.ContextOverflowError.isInstance(result)).toBe(false)
expect(MessageV2.APIError.isInstance(result)).toBe(true)
})
test("serializes unknown inputs", () => {
const result = MessageV2.fromError(123, { providerID })
expect(result).toStrictEqual({
name: "UnknownError",
data: {
message: "123",
},
})
})
test("serializes tagged errors with their message", () => {
const result = MessageV2.fromError(new Question.RejectedError(), { providerID })
expect(result).toStrictEqual({
name: "UnknownError",
data: {
message: "The user dismissed this question",
},
})
})
})

View File

@@ -0,0 +1,115 @@
import { describe, expect, test } from "bun:test"
import path from "path"
import { Instance } from "../../src/project/instance"
import { Session } from "../../src/session"
import { MessageV2 } from "../../src/session/message-v2"
import { MessageID, PartID, type SessionID } from "../../src/session/schema"
import { Log } from "../../src/util/log"
const root = path.join(__dirname, "../..")
Log.init({ print: false })
async function fill(sessionID: SessionID, count: number, time = (i: number) => Date.now() + i) {
const ids = [] as MessageID[]
for (let i = 0; i < count; i++) {
const id = MessageID.ascending()
ids.push(id)
await Session.updateMessage({
id,
sessionID,
role: "user",
time: { created: time(i) },
agent: "test",
model: { providerID: "test", modelID: "test" },
tools: {},
mode: "",
} as unknown as MessageV2.Info)
await Session.updatePart({
id: PartID.ascending(),
sessionID,
messageID: id,
type: "text",
text: `m${i}`,
})
}
return ids
}
describe("session message pagination", () => {
test("pages backward with opaque cursors", async () => {
await Instance.provide({
directory: root,
fn: async () => {
const session = await Session.create({})
const ids = await fill(session.id, 6)
const a = await MessageV2.page({ sessionID: session.id, limit: 2 })
expect(a.items.map((item) => item.info.id)).toEqual(ids.slice(-2))
expect(a.items.every((item) => item.parts.length === 1)).toBe(true)
expect(a.more).toBe(true)
expect(a.cursor).toBeTruthy()
const b = await MessageV2.page({ sessionID: session.id, limit: 2, before: a.cursor! })
expect(b.items.map((item) => item.info.id)).toEqual(ids.slice(-4, -2))
expect(b.more).toBe(true)
expect(b.cursor).toBeTruthy()
const c = await MessageV2.page({ sessionID: session.id, limit: 2, before: b.cursor! })
expect(c.items.map((item) => item.info.id)).toEqual(ids.slice(0, 2))
expect(c.more).toBe(false)
expect(c.cursor).toBeUndefined()
await Session.remove(session.id)
},
})
})
test("keeps stream order newest first", async () => {
await Instance.provide({
directory: root,
fn: async () => {
const session = await Session.create({})
const ids = await fill(session.id, 5)
const items = await Array.fromAsync(MessageV2.stream(session.id))
expect(items.map((item) => item.info.id)).toEqual(ids.slice().reverse())
await Session.remove(session.id)
},
})
})
test("accepts cursors generated from fractional timestamps", async () => {
await Instance.provide({
directory: root,
fn: async () => {
const session = await Session.create({})
const ids = await fill(session.id, 4, (i) => 1000.5 + i)
const a = await MessageV2.page({ sessionID: session.id, limit: 2 })
const b = await MessageV2.page({ sessionID: session.id, limit: 2, before: a.cursor! })
expect(a.items.map((item) => item.info.id)).toEqual(ids.slice(-2))
expect(b.items.map((item) => item.info.id)).toEqual(ids.slice(0, 2))
await Session.remove(session.id)
},
})
})
test("scopes get by session id", async () => {
await Instance.provide({
directory: root,
fn: async () => {
const a = await Session.create({})
const b = await Session.create({})
const [id] = await fill(a.id, 1)
await expect(MessageV2.get({ sessionID: b.id, messageID: id })).rejects.toMatchObject({ name: "NotFoundError" })
await Session.remove(a.id)
await Session.remove(b.id)
},
})
})
})

View File

@@ -0,0 +1,212 @@
import path from "path"
import { describe, expect, test } from "bun:test"
import { fileURLToPath } from "url"
import { Instance } from "../../src/project/instance"
import { ModelID, ProviderID } from "../../src/provider/schema"
import { Session } from "../../src/session"
import { MessageV2 } from "../../src/session/message-v2"
import { SessionPrompt } from "../../src/session/prompt"
import { Log } from "../../src/util/log"
import { tmpdir } from "../fixture/fixture"
Log.init({ print: false })
describe("session.prompt missing file", () => {
test("does not fail the prompt when a file part is missing", async () => {
await using tmp = await tmpdir({
git: true,
config: {
agent: {
build: {
model: "openai/gpt-5.2",
},
},
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const session = await Session.create({})
const missing = path.join(tmp.path, "does-not-exist.ts")
const msg = await SessionPrompt.prompt({
sessionID: session.id,
agent: "build",
noReply: true,
parts: [
{ type: "text", text: "please review @does-not-exist.ts" },
{
type: "file",
mime: "text/plain",
url: `file://${missing}`,
filename: "does-not-exist.ts",
},
],
})
if (msg.info.role !== "user") throw new Error("expected user message")
const hasFailure = msg.parts.some(
(part) => part.type === "text" && part.synthetic && part.text.includes("Read tool failed to read"),
)
expect(hasFailure).toBe(true)
await Session.remove(session.id)
},
})
})
test("keeps stored part order stable when file resolution is async", async () => {
await using tmp = await tmpdir({
git: true,
config: {
agent: {
build: {
model: "openai/gpt-5.2",
},
},
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const session = await Session.create({})
const missing = path.join(tmp.path, "still-missing.ts")
const msg = await SessionPrompt.prompt({
sessionID: session.id,
agent: "build",
noReply: true,
parts: [
{
type: "file",
mime: "text/plain",
url: `file://${missing}`,
filename: "still-missing.ts",
},
{ type: "text", text: "after-file" },
],
})
if (msg.info.role !== "user") throw new Error("expected user message")
const stored = await MessageV2.get({
sessionID: session.id,
messageID: msg.info.id,
})
const text = stored.parts.filter((part) => part.type === "text").map((part) => part.text)
expect(text[0]?.startsWith("Called the Read tool with the following input:")).toBe(true)
expect(text[1]?.includes("Read tool failed to read")).toBe(true)
expect(text[2]).toBe("after-file")
await Session.remove(session.id)
},
})
})
})
describe("session.prompt special characters", () => {
test("handles filenames with # character", async () => {
await using tmp = await tmpdir({
git: true,
init: async (dir) => {
await Bun.write(path.join(dir, "file#name.txt"), "special content\n")
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const session = await Session.create({})
const template = "Read @file#name.txt"
const parts = await SessionPrompt.resolvePromptParts(template)
const fileParts = parts.filter((part) => part.type === "file")
expect(fileParts.length).toBe(1)
expect(fileParts[0].filename).toBe("file#name.txt")
expect(fileParts[0].url).toContain("%23")
const decodedPath = fileURLToPath(fileParts[0].url)
expect(decodedPath).toBe(path.join(tmp.path, "file#name.txt"))
const message = await SessionPrompt.prompt({
sessionID: session.id,
parts,
noReply: true,
})
const stored = await MessageV2.get({ sessionID: session.id, messageID: message.info.id })
const textParts = stored.parts.filter((part) => part.type === "text")
const hasContent = textParts.some((part) => part.text.includes("special content"))
expect(hasContent).toBe(true)
await Session.remove(session.id)
},
})
})
})
describe("session.prompt agent variant", () => {
test("applies agent variant only when using agent model", async () => {
const prev = process.env.OPENAI_API_KEY
process.env.OPENAI_API_KEY = "test-openai-key"
try {
await using tmp = await tmpdir({
git: true,
config: {
agent: {
build: {
model: "openai/gpt-5.2",
variant: "xhigh",
},
},
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const session = await Session.create({})
const other = await SessionPrompt.prompt({
sessionID: session.id,
agent: "build",
model: { providerID: ProviderID.make("opencode"), modelID: ModelID.make("kimi-k2.5-free") },
noReply: true,
parts: [{ type: "text", text: "hello" }],
})
if (other.info.role !== "user") throw new Error("expected user message")
expect(other.info.variant).toBeUndefined()
const match = await SessionPrompt.prompt({
sessionID: session.id,
agent: "build",
noReply: true,
parts: [{ type: "text", text: "hello again" }],
})
if (match.info.role !== "user") throw new Error("expected user message")
expect(match.info.model).toEqual({ providerID: ProviderID.make("openai"), modelID: ModelID.make("gpt-5.2") })
expect(match.info.variant).toBe("xhigh")
const override = await SessionPrompt.prompt({
sessionID: session.id,
agent: "build",
noReply: true,
variant: "high",
parts: [{ type: "text", text: "hello third" }],
})
if (override.info.role !== "user") throw new Error("expected user message")
expect(override.info.variant).toBe("high")
await Session.remove(session.id)
},
})
} finally {
if (prev === undefined) delete process.env.OPENAI_API_KEY
else process.env.OPENAI_API_KEY = prev
}
})
})

View File

@@ -0,0 +1,192 @@
import { describe, expect, test } from "bun:test"
import type { NamedError } from "@opencode-ai/util/error"
import { APICallError } from "ai"
import { setTimeout as sleep } from "node:timers/promises"
import { SessionRetry } from "../../src/session/retry"
import { MessageV2 } from "../../src/session/message-v2"
import { ProviderID } from "../../src/provider/schema"
const providerID = ProviderID.make("test")
function apiError(headers?: Record<string, string>): MessageV2.APIError {
return new MessageV2.APIError({
message: "boom",
isRetryable: true,
responseHeaders: headers,
}).toObject() as MessageV2.APIError
}
function wrap(message: unknown): ReturnType<NamedError["toObject"]> {
return { data: { message } } as ReturnType<NamedError["toObject"]>
}
describe("session.retry.delay", () => {
test("caps delay at 30 seconds when headers missing", () => {
const error = apiError()
const delays = Array.from({ length: 10 }, (_, index) => SessionRetry.delay(index + 1, error))
expect(delays).toStrictEqual([2000, 4000, 8000, 16000, 30000, 30000, 30000, 30000, 30000, 30000])
})
test("prefers retry-after-ms when shorter than exponential", () => {
const error = apiError({ "retry-after-ms": "1500" })
expect(SessionRetry.delay(4, error)).toBe(1500)
})
test("uses retry-after seconds when reasonable", () => {
const error = apiError({ "retry-after": "30" })
expect(SessionRetry.delay(3, error)).toBe(30000)
})
test("accepts http-date retry-after values", () => {
const date = new Date(Date.now() + 20000).toUTCString()
const error = apiError({ "retry-after": date })
const d = SessionRetry.delay(1, error)
expect(d).toBeGreaterThanOrEqual(19000)
expect(d).toBeLessThanOrEqual(20000)
})
test("ignores invalid retry hints", () => {
const error = apiError({ "retry-after": "not-a-number" })
expect(SessionRetry.delay(1, error)).toBe(2000)
})
test("ignores malformed date retry hints", () => {
const error = apiError({ "retry-after": "Invalid Date String" })
expect(SessionRetry.delay(1, error)).toBe(2000)
})
test("ignores past date retry hints", () => {
const pastDate = new Date(Date.now() - 5000).toUTCString()
const error = apiError({ "retry-after": pastDate })
expect(SessionRetry.delay(1, error)).toBe(2000)
})
test("uses retry-after values even when exceeding 10 minutes with headers", () => {
const error = apiError({ "retry-after": "50" })
expect(SessionRetry.delay(1, error)).toBe(50000)
const longError = apiError({ "retry-after-ms": "700000" })
expect(SessionRetry.delay(1, longError)).toBe(700000)
})
test("sleep caps delay to max 32-bit signed integer to avoid TimeoutOverflowWarning", async () => {
const controller = new AbortController()
const warnings: string[] = []
const originalWarn = process.emitWarning
process.emitWarning = (warning: string | Error) => {
warnings.push(typeof warning === "string" ? warning : warning.message)
}
const promise = SessionRetry.sleep(2_560_914_000, controller.signal)
controller.abort()
try {
await promise
} catch {}
process.emitWarning = originalWarn
expect(warnings.some((w) => w.includes("TimeoutOverflowWarning"))).toBe(false)
})
})
describe("session.retry.retryable", () => {
test("maps too_many_requests json messages", () => {
const error = wrap(JSON.stringify({ type: "error", error: { type: "too_many_requests" } }))
expect(SessionRetry.retryable(error)).toBe("Too Many Requests")
})
test("maps overloaded provider codes", () => {
const error = wrap(JSON.stringify({ code: "resource_exhausted" }))
expect(SessionRetry.retryable(error)).toBe("Provider is overloaded")
})
test("handles json messages without code", () => {
const error = wrap(JSON.stringify({ error: { message: "no_kv_space" } }))
expect(SessionRetry.retryable(error)).toBe(`{"error":{"message":"no_kv_space"}}`)
})
test("does not throw on numeric error codes", () => {
const error = wrap(JSON.stringify({ type: "error", error: { code: 123 } }))
const result = SessionRetry.retryable(error)
expect(result).toBeUndefined()
})
test("returns undefined for non-json message", () => {
const error = wrap("not-json")
expect(SessionRetry.retryable(error)).toBeUndefined()
})
test("does not retry context overflow errors", () => {
const error = new MessageV2.ContextOverflowError({
message: "Input exceeds context window of this model",
responseBody: '{"error":{"code":"context_length_exceeded"}}',
}).toObject() as ReturnType<NamedError["toObject"]>
expect(SessionRetry.retryable(error)).toBeUndefined()
})
})
describe("session.message-v2.fromError", () => {
test.concurrent(
"converts ECONNRESET socket errors to retryable APIError",
async () => {
using server = Bun.serve({
port: 0,
idleTimeout: 8,
async fetch(req) {
return new Response(
new ReadableStream({
async pull(controller) {
controller.enqueue("Hello,")
await sleep(10000)
controller.enqueue(" World!")
controller.close()
},
}),
{ headers: { "Content-Type": "text/plain" } },
)
},
})
const error = await fetch(new URL("/", server.url.origin))
.then((res) => res.text())
.catch((e) => e)
const result = MessageV2.fromError(error, { providerID })
expect(MessageV2.APIError.isInstance(result)).toBe(true)
expect((result as MessageV2.APIError).data.isRetryable).toBe(true)
expect((result as MessageV2.APIError).data.message).toBe("Connection reset by server")
expect((result as MessageV2.APIError).data.metadata?.code).toBe("ECONNRESET")
expect((result as MessageV2.APIError).data.metadata?.message).toInclude("socket connection")
},
15_000,
)
test("ECONNRESET socket error is retryable", () => {
const error = new MessageV2.APIError({
message: "Connection reset by server",
isRetryable: true,
metadata: { code: "ECONNRESET", message: "The socket connection was closed unexpectedly" },
}).toObject() as MessageV2.APIError
const retryable = SessionRetry.retryable(error)
expect(retryable).toBeDefined()
expect(retryable).toBe("Connection reset by server")
})
test("marks OpenAI 404 status codes as retryable", () => {
const error = new APICallError({
message: "boom",
url: "https://api.openai.com/v1/chat/completions",
requestBodyValues: {},
statusCode: 404,
responseHeaders: { "content-type": "application/json" },
responseBody: '{"error":"boom"}',
isRetryable: false,
})
const result = MessageV2.fromError(error, { providerID: ProviderID.make("openai") }) as MessageV2.APIError
expect(result.data.isRetryable).toBe(true)
})
})

View File

@@ -0,0 +1,286 @@
import { describe, expect, test, beforeEach, afterEach } from "bun:test"
import path from "path"
import { Session } from "../../src/session"
import { ModelID, ProviderID } from "../../src/provider/schema"
import { SessionRevert } from "../../src/session/revert"
import { SessionCompaction } from "../../src/session/compaction"
import { MessageV2 } from "../../src/session/message-v2"
import { Log } from "../../src/util/log"
import { Instance } from "../../src/project/instance"
import { MessageID, PartID } from "../../src/session/schema"
import { tmpdir } from "../fixture/fixture"
const projectRoot = path.join(__dirname, "../..")
Log.init({ print: false })
describe("revert + compact workflow", () => {
test("should properly handle compact command after revert", async () => {
await using tmp = await tmpdir({ git: true })
await Instance.provide({
directory: tmp.path,
fn: async () => {
// Create a session
const session = await Session.create({})
const sessionID = session.id
// Create a user message
const userMsg1 = await Session.updateMessage({
id: MessageID.ascending(),
role: "user",
sessionID,
agent: "default",
model: {
providerID: ProviderID.make("openai"),
modelID: ModelID.make("gpt-4"),
},
time: {
created: Date.now(),
},
})
// Add a text part to the user message
await Session.updatePart({
id: PartID.ascending(),
messageID: userMsg1.id,
sessionID,
type: "text",
text: "Hello, please help me",
})
// Create an assistant response message
const assistantMsg1: MessageV2.Assistant = {
id: MessageID.ascending(),
role: "assistant",
sessionID,
mode: "default",
agent: "default",
path: {
cwd: tmp.path,
root: tmp.path,
},
cost: 0,
tokens: {
output: 0,
input: 0,
reasoning: 0,
cache: { read: 0, write: 0 },
},
modelID: ModelID.make("gpt-4"),
providerID: ProviderID.make("openai"),
parentID: userMsg1.id,
time: {
created: Date.now(),
},
finish: "end_turn",
}
await Session.updateMessage(assistantMsg1)
// Add a text part to the assistant message
await Session.updatePart({
id: PartID.ascending(),
messageID: assistantMsg1.id,
sessionID,
type: "text",
text: "Sure, I'll help you!",
})
// Create another user message
const userMsg2 = await Session.updateMessage({
id: MessageID.ascending(),
role: "user",
sessionID,
agent: "default",
model: {
providerID: ProviderID.make("openai"),
modelID: ModelID.make("gpt-4"),
},
time: {
created: Date.now(),
},
})
await Session.updatePart({
id: PartID.ascending(),
messageID: userMsg2.id,
sessionID,
type: "text",
text: "What's the capital of France?",
})
// Create another assistant response
const assistantMsg2: MessageV2.Assistant = {
id: MessageID.ascending(),
role: "assistant",
sessionID,
mode: "default",
agent: "default",
path: {
cwd: tmp.path,
root: tmp.path,
},
cost: 0,
tokens: {
output: 0,
input: 0,
reasoning: 0,
cache: { read: 0, write: 0 },
},
modelID: ModelID.make("gpt-4"),
providerID: ProviderID.make("openai"),
parentID: userMsg2.id,
time: {
created: Date.now(),
},
finish: "end_turn",
}
await Session.updateMessage(assistantMsg2)
await Session.updatePart({
id: PartID.ascending(),
messageID: assistantMsg2.id,
sessionID,
type: "text",
text: "The capital of France is Paris.",
})
// Verify messages before revert
let messages = await Session.messages({ sessionID })
expect(messages.length).toBe(4) // 2 user + 2 assistant messages
const messageIds = messages.map((m) => m.info.id)
expect(messageIds).toContain(userMsg1.id)
expect(messageIds).toContain(userMsg2.id)
expect(messageIds).toContain(assistantMsg1.id)
expect(messageIds).toContain(assistantMsg2.id)
// Revert the last user message (userMsg2)
await SessionRevert.revert({
sessionID,
messageID: userMsg2.id,
})
// Check that revert state is set
let sessionInfo = await Session.get(sessionID)
expect(sessionInfo.revert).toBeDefined()
const revertMessageID = sessionInfo.revert?.messageID
expect(revertMessageID).toBeDefined()
// Messages should still be in the list (not removed yet, just marked for revert)
messages = await Session.messages({ sessionID })
expect(messages.length).toBe(4)
// Now clean up the revert state (this is what the compact endpoint should do)
await SessionRevert.cleanup(sessionInfo)
// After cleanup, the reverted messages (those after the revert point) should be removed
messages = await Session.messages({ sessionID })
const remainingIds = messages.map((m) => m.info.id)
// The revert point is somewhere in the message chain, so we should have fewer messages
expect(messages.length).toBeLessThan(4)
// userMsg2 and assistantMsg2 should be removed (they come after the revert point)
expect(remainingIds).not.toContain(userMsg2.id)
expect(remainingIds).not.toContain(assistantMsg2.id)
// Revert state should be cleared
sessionInfo = await Session.get(sessionID)
expect(sessionInfo.revert).toBeUndefined()
// Clean up
await Session.remove(sessionID)
},
})
})
test("should properly clean up revert state before creating compaction message", async () => {
await using tmp = await tmpdir({ git: true })
await Instance.provide({
directory: tmp.path,
fn: async () => {
// Create a session
const session = await Session.create({})
const sessionID = session.id
// Create initial messages
const userMsg = await Session.updateMessage({
id: MessageID.ascending(),
role: "user",
sessionID,
agent: "default",
model: {
providerID: ProviderID.make("openai"),
modelID: ModelID.make("gpt-4"),
},
time: {
created: Date.now(),
},
})
await Session.updatePart({
id: PartID.ascending(),
messageID: userMsg.id,
sessionID,
type: "text",
text: "Hello",
})
const assistantMsg: MessageV2.Assistant = {
id: MessageID.ascending(),
role: "assistant",
sessionID,
mode: "default",
agent: "default",
path: {
cwd: tmp.path,
root: tmp.path,
},
cost: 0,
tokens: {
output: 0,
input: 0,
reasoning: 0,
cache: { read: 0, write: 0 },
},
modelID: ModelID.make("gpt-4"),
providerID: ProviderID.make("openai"),
parentID: userMsg.id,
time: {
created: Date.now(),
},
finish: "end_turn",
}
await Session.updateMessage(assistantMsg)
await Session.updatePart({
id: PartID.ascending(),
messageID: assistantMsg.id,
sessionID,
type: "text",
text: "Hi there!",
})
// Revert the user message
await SessionRevert.revert({
sessionID,
messageID: userMsg.id,
})
// Check that revert state is set
let sessionInfo = await Session.get(sessionID)
expect(sessionInfo.revert).toBeDefined()
// Simulate what the compact endpoint does: cleanup revert before creating compaction
await SessionRevert.cleanup(sessionInfo)
// Verify revert state is cleared
sessionInfo = await Session.get(sessionID)
expect(sessionInfo.revert).toBeUndefined()
// Verify messages are properly cleaned up
const messages = await Session.messages({ sessionID })
expect(messages.length).toBe(0) // All messages should be reverted
// Clean up
await Session.remove(sessionID)
},
})
})
})

View File

@@ -0,0 +1,142 @@
import { describe, expect, test } from "bun:test"
import path from "path"
import { Session } from "../../src/session"
import { Bus } from "../../src/bus"
import { Log } from "../../src/util/log"
import { Instance } from "../../src/project/instance"
import { MessageV2 } from "../../src/session/message-v2"
import { MessageID, PartID } from "../../src/session/schema"
const projectRoot = path.join(__dirname, "../..")
Log.init({ print: false })
describe("session.started event", () => {
test("should emit session.started event when session is created", async () => {
await Instance.provide({
directory: projectRoot,
fn: async () => {
let eventReceived = false
let receivedInfo: Session.Info | undefined
const unsub = Bus.subscribe(Session.Event.Created, (event) => {
eventReceived = true
receivedInfo = event.properties.info as Session.Info
})
const session = await Session.create({})
await new Promise((resolve) => setTimeout(resolve, 100))
unsub()
expect(eventReceived).toBe(true)
expect(receivedInfo).toBeDefined()
expect(receivedInfo?.id).toBe(session.id)
expect(receivedInfo?.projectID).toBe(session.projectID)
expect(receivedInfo?.directory).toBe(session.directory)
expect(receivedInfo?.title).toBe(session.title)
await Session.remove(session.id)
},
})
})
test("session.started event should be emitted before session.updated", async () => {
await Instance.provide({
directory: projectRoot,
fn: async () => {
const events: string[] = []
const unsubStarted = Bus.subscribe(Session.Event.Created, () => {
events.push("started")
})
const unsubUpdated = Bus.subscribe(Session.Event.Updated, () => {
events.push("updated")
})
const session = await Session.create({})
await new Promise((resolve) => setTimeout(resolve, 100))
unsubStarted()
unsubUpdated()
expect(events).toContain("started")
expect(events).toContain("updated")
expect(events.indexOf("started")).toBeLessThan(events.indexOf("updated"))
await Session.remove(session.id)
},
})
})
})
describe("step-finish token propagation via Bus event", () => {
test(
"non-zero tokens propagate through PartUpdated event",
async () => {
await Instance.provide({
directory: projectRoot,
fn: async () => {
const session = await Session.create({})
const messageID = MessageID.ascending()
await Session.updateMessage({
id: messageID,
sessionID: session.id,
role: "user",
time: { created: Date.now() },
agent: "user",
model: { providerID: "test", modelID: "test" },
tools: {},
mode: "",
} as unknown as MessageV2.Info)
let received: MessageV2.Part | undefined
const unsub = Bus.subscribe(MessageV2.Event.PartUpdated, (event) => {
received = event.properties.part
})
const tokens = {
total: 1500,
input: 500,
output: 800,
reasoning: 200,
cache: { read: 100, write: 50 },
}
const partInput = {
id: PartID.ascending(),
messageID,
sessionID: session.id,
type: "step-finish" as const,
reason: "stop",
cost: 0.005,
tokens,
}
await Session.updatePart(partInput)
await new Promise((resolve) => setTimeout(resolve, 100))
expect(received).toBeDefined()
expect(received!.type).toBe("step-finish")
const finish = received as MessageV2.StepFinishPart
expect(finish.tokens.input).toBe(500)
expect(finish.tokens.output).toBe(800)
expect(finish.tokens.reasoning).toBe(200)
expect(finish.tokens.total).toBe(1500)
expect(finish.tokens.cache.read).toBe(100)
expect(finish.tokens.cache.write).toBe(50)
expect(finish.cost).toBe(0.005)
expect(received).not.toBe(partInput)
unsub()
await Session.remove(session.id)
},
})
},
{ timeout: 30000 },
)
})

View File

@@ -0,0 +1,233 @@
import { describe, expect, test } from "bun:test"
import path from "path"
import { Session } from "../../src/session"
import { SessionPrompt } from "../../src/session/prompt"
import { Log } from "../../src/util/log"
import { Instance } from "../../src/project/instance"
import { MessageV2 } from "../../src/session/message-v2"
const projectRoot = path.join(__dirname, "../..")
Log.init({ print: false })
// Skip tests if no API key is available
const hasApiKey = !!process.env.ANTHROPIC_API_KEY
// Helper to run test within Instance context
async function withInstance<T>(fn: () => Promise<T>): Promise<T> {
return Instance.provide({
directory: projectRoot,
fn,
})
}
describe("StructuredOutput Integration", () => {
test.skipIf(!hasApiKey)(
"produces structured output with simple schema",
async () => {
await withInstance(async () => {
const session = await Session.create({ title: "Structured Output Test" })
const result = await SessionPrompt.prompt({
sessionID: session.id,
parts: [
{
type: "text",
text: "What is 2 + 2? Provide a simple answer.",
},
],
format: {
type: "json_schema",
schema: {
type: "object",
properties: {
answer: { type: "number", description: "The numerical answer" },
explanation: { type: "string", description: "Brief explanation" },
},
required: ["answer"],
},
retryCount: 0,
},
})
// Verify structured output was captured (only on assistant messages)
expect(result.info.role).toBe("assistant")
if (result.info.role === "assistant") {
expect(result.info.structured).toBeDefined()
expect(typeof result.info.structured).toBe("object")
const output = result.info.structured as any
expect(output.answer).toBe(4)
// Verify no error was set
expect(result.info.error).toBeUndefined()
}
// Clean up
// Note: Not removing session to avoid race with background SessionSummary.summarize
})
},
60000,
)
test.skipIf(!hasApiKey)(
"produces structured output with nested objects",
async () => {
await withInstance(async () => {
const session = await Session.create({ title: "Nested Schema Test" })
const result = await SessionPrompt.prompt({
sessionID: session.id,
parts: [
{
type: "text",
text: "Tell me about Anthropic company in a structured format.",
},
],
format: {
type: "json_schema",
schema: {
type: "object",
properties: {
company: {
type: "object",
properties: {
name: { type: "string" },
founded: { type: "number" },
},
required: ["name", "founded"],
},
products: {
type: "array",
items: { type: "string" },
},
},
required: ["company"],
},
retryCount: 0,
},
})
// Verify structured output was captured (only on assistant messages)
expect(result.info.role).toBe("assistant")
if (result.info.role === "assistant") {
expect(result.info.structured).toBeDefined()
const output = result.info.structured as any
expect(output.company).toBeDefined()
expect(output.company.name).toBe("Anthropic")
expect(typeof output.company.founded).toBe("number")
if (output.products) {
expect(Array.isArray(output.products)).toBe(true)
}
// Verify no error was set
expect(result.info.error).toBeUndefined()
}
// Clean up
// Note: Not removing session to avoid race with background SessionSummary.summarize
})
},
60000,
)
test.skipIf(!hasApiKey)(
"works with text outputFormat (default)",
async () => {
await withInstance(async () => {
const session = await Session.create({ title: "Text Output Test" })
const result = await SessionPrompt.prompt({
sessionID: session.id,
parts: [
{
type: "text",
text: "Say hello.",
},
],
format: {
type: "text",
},
})
// Verify no structured output (text mode) and no error
expect(result.info.role).toBe("assistant")
if (result.info.role === "assistant") {
expect(result.info.structured).toBeUndefined()
expect(result.info.error).toBeUndefined()
}
// Verify we got a response with parts
expect(result.parts.length).toBeGreaterThan(0)
// Clean up
// Note: Not removing session to avoid race with background SessionSummary.summarize
})
},
60000,
)
test.skipIf(!hasApiKey)(
"stores outputFormat on user message",
async () => {
await withInstance(async () => {
const session = await Session.create({ title: "OutputFormat Storage Test" })
await SessionPrompt.prompt({
sessionID: session.id,
parts: [
{
type: "text",
text: "What is 1 + 1?",
},
],
format: {
type: "json_schema",
schema: {
type: "object",
properties: {
result: { type: "number" },
},
required: ["result"],
},
retryCount: 3,
},
})
// Get all messages from session
const messages = await Session.messages({ sessionID: session.id })
const userMessage = messages.find((m) => m.info.role === "user")
// Verify outputFormat was stored on user message
expect(userMessage).toBeDefined()
if (userMessage?.info.role === "user") {
expect(userMessage.info.format).toBeDefined()
expect(userMessage.info.format?.type).toBe("json_schema")
if (userMessage.info.format?.type === "json_schema") {
expect(userMessage.info.format.retryCount).toBe(3)
}
}
// Clean up
// Note: Not removing session to avoid race with background SessionSummary.summarize
})
},
60000,
)
test("unit test: StructuredOutputError is properly structured", () => {
const error = new MessageV2.StructuredOutputError({
message: "Failed to produce valid structured output after 3 attempts",
retries: 3,
})
expect(error.name).toBe("StructuredOutputError")
expect(error.data.message).toContain("3 attempts")
expect(error.data.retries).toBe(3)
const obj = error.toObject()
expect(obj.name).toBe("StructuredOutputError")
expect(obj.data.retries).toBe(3)
})
})

View File

@@ -0,0 +1,386 @@
import { describe, expect, test } from "bun:test"
import { MessageV2 } from "../../src/session/message-v2"
import { SessionPrompt } from "../../src/session/prompt"
import { SessionID, MessageID } from "../../src/session/schema"
describe("structured-output.OutputFormat", () => {
test("parses text format", () => {
const result = MessageV2.Format.safeParse({ type: "text" })
expect(result.success).toBe(true)
if (result.success) {
expect(result.data.type).toBe("text")
}
})
test("parses json_schema format with defaults", () => {
const result = MessageV2.Format.safeParse({
type: "json_schema",
schema: { type: "object", properties: { name: { type: "string" } } },
})
expect(result.success).toBe(true)
if (result.success) {
expect(result.data.type).toBe("json_schema")
if (result.data.type === "json_schema") {
expect(result.data.retryCount).toBe(2) // default value
}
}
})
test("parses json_schema format with custom retryCount", () => {
const result = MessageV2.Format.safeParse({
type: "json_schema",
schema: { type: "object" },
retryCount: 5,
})
expect(result.success).toBe(true)
if (result.success && result.data.type === "json_schema") {
expect(result.data.retryCount).toBe(5)
}
})
test("rejects invalid type", () => {
const result = MessageV2.Format.safeParse({ type: "invalid" })
expect(result.success).toBe(false)
})
test("rejects json_schema without schema", () => {
const result = MessageV2.Format.safeParse({ type: "json_schema" })
expect(result.success).toBe(false)
})
test("rejects negative retryCount", () => {
const result = MessageV2.Format.safeParse({
type: "json_schema",
schema: { type: "object" },
retryCount: -1,
})
expect(result.success).toBe(false)
})
})
describe("structured-output.StructuredOutputError", () => {
test("creates error with message and retries", () => {
const error = new MessageV2.StructuredOutputError({
message: "Failed to validate",
retries: 3,
})
expect(error.name).toBe("StructuredOutputError")
expect(error.data.message).toBe("Failed to validate")
expect(error.data.retries).toBe(3)
})
test("converts to object correctly", () => {
const error = new MessageV2.StructuredOutputError({
message: "Test error",
retries: 2,
})
const obj = error.toObject()
expect(obj.name).toBe("StructuredOutputError")
expect(obj.data.message).toBe("Test error")
expect(obj.data.retries).toBe(2)
})
test("isInstance correctly identifies error", () => {
const error = new MessageV2.StructuredOutputError({
message: "Test",
retries: 1,
})
expect(MessageV2.StructuredOutputError.isInstance(error)).toBe(true)
expect(MessageV2.StructuredOutputError.isInstance({ name: "other" })).toBe(false)
})
})
describe("structured-output.UserMessage", () => {
test("user message accepts outputFormat", () => {
const result = MessageV2.User.safeParse({
id: MessageID.ascending(),
sessionID: SessionID.descending(),
role: "user",
time: { created: Date.now() },
agent: "default",
model: { providerID: "anthropic", modelID: "claude-3" },
outputFormat: {
type: "json_schema",
schema: { type: "object" },
},
})
expect(result.success).toBe(true)
})
test("user message works without outputFormat (optional)", () => {
const result = MessageV2.User.safeParse({
id: MessageID.ascending(),
sessionID: SessionID.descending(),
role: "user",
time: { created: Date.now() },
agent: "default",
model: { providerID: "anthropic", modelID: "claude-3" },
})
expect(result.success).toBe(true)
})
})
describe("structured-output.AssistantMessage", () => {
const baseAssistantMessage = {
id: MessageID.ascending(),
sessionID: SessionID.descending(),
role: "assistant" as const,
parentID: MessageID.ascending(),
modelID: "claude-3",
providerID: "anthropic",
mode: "default",
agent: "default",
path: { cwd: "/test", root: "/test" },
cost: 0.001,
tokens: { input: 100, output: 50, reasoning: 0, cache: { read: 0, write: 0 } },
time: { created: Date.now() },
}
test("assistant message accepts structured", () => {
const result = MessageV2.Assistant.safeParse({
...baseAssistantMessage,
structured: { company: "Anthropic", founded: 2021 },
})
expect(result.success).toBe(true)
if (result.success) {
expect(result.data.structured).toEqual({ company: "Anthropic", founded: 2021 })
}
})
test("assistant message works without structured_output (optional)", () => {
const result = MessageV2.Assistant.safeParse(baseAssistantMessage)
expect(result.success).toBe(true)
})
})
describe("structured-output.createStructuredOutputTool", () => {
test("creates tool with correct id", () => {
const tool = SessionPrompt.createStructuredOutputTool({
schema: { type: "object", properties: { name: { type: "string" } } },
onSuccess: () => {},
})
// AI SDK tool type doesn't expose id, but we set it internally
expect((tool as any).id).toBe("StructuredOutput")
})
test("creates tool with description", () => {
const tool = SessionPrompt.createStructuredOutputTool({
schema: { type: "object" },
onSuccess: () => {},
})
expect(tool.description).toContain("structured format")
})
test("creates tool with schema as inputSchema", () => {
const schema = {
type: "object",
properties: {
company: { type: "string" },
founded: { type: "number" },
},
required: ["company"],
}
const tool = SessionPrompt.createStructuredOutputTool({
schema,
onSuccess: () => {},
})
// AI SDK wraps schema in { jsonSchema: {...} }
expect(tool.inputSchema).toBeDefined()
const inputSchema = tool.inputSchema as any
expect(inputSchema.jsonSchema?.properties?.company).toBeDefined()
expect(inputSchema.jsonSchema?.properties?.founded).toBeDefined()
})
test("strips $schema property from inputSchema", () => {
const schema = {
$schema: "http://json-schema.org/draft-07/schema#",
type: "object",
properties: { name: { type: "string" } },
}
const tool = SessionPrompt.createStructuredOutputTool({
schema,
onSuccess: () => {},
})
// AI SDK wraps schema in { jsonSchema: {...} }
const inputSchema = tool.inputSchema as any
expect(inputSchema.jsonSchema?.$schema).toBeUndefined()
})
test("execute calls onSuccess with valid args", async () => {
let capturedOutput: unknown
const tool = SessionPrompt.createStructuredOutputTool({
schema: { type: "object", properties: { name: { type: "string" } } },
onSuccess: (output) => {
capturedOutput = output
},
})
expect(tool.execute).toBeDefined()
const testArgs = { name: "Test Company" }
const result = await tool.execute!(testArgs, {
toolCallId: "test-call-id",
messages: [],
abortSignal: undefined as any,
})
expect(capturedOutput).toEqual(testArgs)
expect(result.output).toBe("Structured output captured successfully.")
expect(result.metadata.valid).toBe(true)
})
test("AI SDK validates schema before execute - missing required field", async () => {
// Note: The AI SDK validates the input against the schema BEFORE calling execute()
// So invalid inputs never reach the tool's execute function
// This test documents the expected schema behavior
const tool = SessionPrompt.createStructuredOutputTool({
schema: {
type: "object",
properties: {
name: { type: "string" },
age: { type: "number" },
},
required: ["name", "age"],
},
onSuccess: () => {},
})
// The schema requires both 'name' and 'age'
expect(tool.inputSchema).toBeDefined()
const inputSchema = tool.inputSchema as any
expect(inputSchema.jsonSchema?.required).toContain("name")
expect(inputSchema.jsonSchema?.required).toContain("age")
})
test("AI SDK validates schema types before execute - wrong type", async () => {
// Note: The AI SDK validates the input against the schema BEFORE calling execute()
// So invalid inputs never reach the tool's execute function
// This test documents the expected schema behavior
const tool = SessionPrompt.createStructuredOutputTool({
schema: {
type: "object",
properties: {
count: { type: "number" },
},
required: ["count"],
},
onSuccess: () => {},
})
// The schema defines 'count' as a number
expect(tool.inputSchema).toBeDefined()
const inputSchema = tool.inputSchema as any
expect(inputSchema.jsonSchema?.properties?.count?.type).toBe("number")
})
test("execute handles nested objects", async () => {
let capturedOutput: unknown
const tool = SessionPrompt.createStructuredOutputTool({
schema: {
type: "object",
properties: {
user: {
type: "object",
properties: {
name: { type: "string" },
email: { type: "string" },
},
required: ["name"],
},
},
required: ["user"],
},
onSuccess: (output) => {
capturedOutput = output
},
})
// Valid nested object - AI SDK validates before calling execute()
const validResult = await tool.execute!(
{ user: { name: "John", email: "john@test.com" } },
{
toolCallId: "test-call-id",
messages: [],
abortSignal: undefined as any,
},
)
expect(capturedOutput).toEqual({ user: { name: "John", email: "john@test.com" } })
expect(validResult.metadata.valid).toBe(true)
// Verify schema has correct nested structure
const inputSchema = tool.inputSchema as any
expect(inputSchema.jsonSchema?.properties?.user?.type).toBe("object")
expect(inputSchema.jsonSchema?.properties?.user?.properties?.name?.type).toBe("string")
expect(inputSchema.jsonSchema?.properties?.user?.required).toContain("name")
})
test("execute handles arrays", async () => {
let capturedOutput: unknown
const tool = SessionPrompt.createStructuredOutputTool({
schema: {
type: "object",
properties: {
tags: {
type: "array",
items: { type: "string" },
},
},
required: ["tags"],
},
onSuccess: (output) => {
capturedOutput = output
},
})
// Valid array - AI SDK validates before calling execute()
const validResult = await tool.execute!(
{ tags: ["a", "b", "c"] },
{
toolCallId: "test-call-id",
messages: [],
abortSignal: undefined as any,
},
)
expect(capturedOutput).toEqual({ tags: ["a", "b", "c"] })
expect(validResult.metadata.valid).toBe(true)
// Verify schema has correct array structure
const inputSchema = tool.inputSchema as any
expect(inputSchema.jsonSchema?.properties?.tags?.type).toBe("array")
expect(inputSchema.jsonSchema?.properties?.tags?.items?.type).toBe("string")
})
test("toModelOutput returns text value", () => {
const tool = SessionPrompt.createStructuredOutputTool({
schema: { type: "object" },
onSuccess: () => {},
})
expect(tool.toModelOutput).toBeDefined()
const modelOutput = tool.toModelOutput!({
output: "Test output",
title: "Test",
metadata: { valid: true },
})
expect(modelOutput.type).toBe("text")
expect(modelOutput.value).toBe("Test output")
})
// Note: Retry behavior is handled by the AI SDK and the prompt loop, not the tool itself
// The tool simply calls onSuccess when execute() is called with valid args
// See prompt.ts loop() for actual retry logic
})

View File

@@ -0,0 +1,59 @@
import { describe, expect, test } from "bun:test"
import path from "path"
import { Agent } from "../../src/agent/agent"
import { Instance } from "../../src/project/instance"
import { SystemPrompt } from "../../src/session/system"
import { tmpdir } from "../fixture/fixture"
describe("session.system", () => {
test("skills output is sorted by name and stable across calls", async () => {
await using tmp = await tmpdir({
git: true,
init: async (dir) => {
for (const [name, description] of [
["zeta-skill", "Zeta skill."],
["alpha-skill", "Alpha skill."],
["middle-skill", "Middle skill."],
]) {
const skillDir = path.join(dir, ".opencode", "skill", name)
await Bun.write(
path.join(skillDir, "SKILL.md"),
`---
name: ${name}
description: ${description}
---
# ${name}
`,
)
}
},
})
const home = process.env.OPENCODE_TEST_HOME
process.env.OPENCODE_TEST_HOME = tmp.path
try {
await Instance.provide({
directory: tmp.path,
fn: async () => {
const build = await Agent.get("build")
const first = await SystemPrompt.skills(build!)
const second = await SystemPrompt.skills(build!)
expect(first).toBe(second)
const alpha = first!.indexOf("<name>alpha-skill</name>")
const middle = first!.indexOf("<name>middle-skill</name>")
const zeta = first!.indexOf("<name>zeta-skill</name>")
expect(alpha).toBeGreaterThan(-1)
expect(middle).toBeGreaterThan(alpha)
expect(zeta).toBeGreaterThan(middle)
},
})
} finally {
process.env.OPENCODE_TEST_HOME = home
}
})
})