mirror of
https://gitea.toothfairyai.com/ToothFairyAI/tf_code.git
synced 2026-04-07 09:18:41 +00:00
refactor: apply minimal tfcode branding
- Rename packages/opencode → packages/tfcode (directory only) - Rename bin/opencode → bin/tfcode (CLI binary) - Rename .opencode → .tfcode (config directory) - Update package.json name and bin field - Update config directory path references (.tfcode) - Keep internal code references as 'opencode' for easy upstream sync - Keep @opencode-ai/* workspace package names This minimal branding approach allows clean merges from upstream opencode repository while providing tfcode branding for users.
This commit is contained in:
447
packages/tfcode/test/provider/amazon-bedrock.test.ts
Normal file
447
packages/tfcode/test/provider/amazon-bedrock.test.ts
Normal file
@@ -0,0 +1,447 @@
|
||||
import { test, expect, describe } from "bun:test"
|
||||
import path from "path"
|
||||
import { unlink } from "fs/promises"
|
||||
|
||||
import { ProviderID } from "../../src/provider/schema"
|
||||
import { tmpdir } from "../fixture/fixture"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Provider } from "../../src/provider/provider"
|
||||
import { Env } from "../../src/env"
|
||||
import { Global } from "../../src/global"
|
||||
import { Filesystem } from "../../src/util/filesystem"
|
||||
|
||||
test("Bedrock: config region takes precedence over AWS_REGION env var", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
"amazon-bedrock": {
|
||||
options: {
|
||||
region: "eu-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_REGION", "us-east-1")
|
||||
Env.set("AWS_PROFILE", "default")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
expect(providers[ProviderID.amazonBedrock].options?.region).toBe("eu-west-1")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("Bedrock: falls back to AWS_REGION env var when no config region", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_REGION", "eu-west-1")
|
||||
Env.set("AWS_PROFILE", "default")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
expect(providers[ProviderID.amazonBedrock].options?.region).toBe("eu-west-1")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("Bedrock: loads when bearer token from auth.json is present", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
"amazon-bedrock": {
|
||||
options: {
|
||||
region: "eu-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
const authPath = path.join(Global.Path.data, "auth.json")
|
||||
|
||||
// Save original auth.json if it exists
|
||||
let originalAuth: string | undefined
|
||||
try {
|
||||
originalAuth = await Filesystem.readText(authPath)
|
||||
} catch {
|
||||
// File doesn't exist, that's fine
|
||||
}
|
||||
|
||||
try {
|
||||
// Write test auth.json
|
||||
await Filesystem.write(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
"amazon-bedrock": {
|
||||
type: "api",
|
||||
key: "test-bearer-token",
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_PROFILE", "")
|
||||
Env.set("AWS_ACCESS_KEY_ID", "")
|
||||
Env.set("AWS_BEARER_TOKEN_BEDROCK", "")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
expect(providers[ProviderID.amazonBedrock].options?.region).toBe("eu-west-1")
|
||||
},
|
||||
})
|
||||
} finally {
|
||||
// Restore original or delete
|
||||
if (originalAuth !== undefined) {
|
||||
await Filesystem.write(authPath, originalAuth)
|
||||
} else {
|
||||
try {
|
||||
await unlink(authPath)
|
||||
} catch {
|
||||
// Ignore errors if file doesn't exist
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
test("Bedrock: config profile takes precedence over AWS_PROFILE env var", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
"amazon-bedrock": {
|
||||
options: {
|
||||
profile: "my-custom-profile",
|
||||
region: "us-east-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_PROFILE", "default")
|
||||
Env.set("AWS_ACCESS_KEY_ID", "test-key-id")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
expect(providers[ProviderID.amazonBedrock].options?.region).toBe("us-east-1")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("Bedrock: includes custom endpoint in options when specified", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
"amazon-bedrock": {
|
||||
options: {
|
||||
endpoint: "https://bedrock-runtime.us-east-1.vpce-xxxxx.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_PROFILE", "default")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
expect(providers[ProviderID.amazonBedrock].options?.endpoint).toBe(
|
||||
"https://bedrock-runtime.us-east-1.vpce-xxxxx.amazonaws.com",
|
||||
)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("Bedrock: autoloads when AWS_WEB_IDENTITY_TOKEN_FILE is present", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
"amazon-bedrock": {
|
||||
options: {
|
||||
region: "us-east-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_WEB_IDENTITY_TOKEN_FILE", "/var/run/secrets/eks.amazonaws.com/serviceaccount/token")
|
||||
Env.set("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/my-eks-role")
|
||||
Env.set("AWS_PROFILE", "")
|
||||
Env.set("AWS_ACCESS_KEY_ID", "")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
expect(providers[ProviderID.amazonBedrock].options?.region).toBe("us-east-1")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
// Tests for cross-region inference profile prefix handling
|
||||
// Models from models.dev may come with prefixes already (e.g., us., eu., global.)
|
||||
// These should NOT be double-prefixed when passed to the SDK
|
||||
|
||||
test("Bedrock: model with us. prefix should not be double-prefixed", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
"amazon-bedrock": {
|
||||
options: {
|
||||
region: "us-east-1",
|
||||
},
|
||||
models: {
|
||||
"us.anthropic.claude-opus-4-5-20251101-v1:0": {
|
||||
name: "Claude Opus 4.5 (US)",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_PROFILE", "default")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
// The model should exist with the us. prefix
|
||||
expect(providers[ProviderID.amazonBedrock].models["us.anthropic.claude-opus-4-5-20251101-v1:0"]).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("Bedrock: model with global. prefix should not be prefixed", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
"amazon-bedrock": {
|
||||
options: {
|
||||
region: "us-east-1",
|
||||
},
|
||||
models: {
|
||||
"global.anthropic.claude-opus-4-5-20251101-v1:0": {
|
||||
name: "Claude Opus 4.5 (Global)",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_PROFILE", "default")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
expect(providers[ProviderID.amazonBedrock].models["global.anthropic.claude-opus-4-5-20251101-v1:0"]).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("Bedrock: model with eu. prefix should not be double-prefixed", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
"amazon-bedrock": {
|
||||
options: {
|
||||
region: "eu-west-1",
|
||||
},
|
||||
models: {
|
||||
"eu.anthropic.claude-opus-4-5-20251101-v1:0": {
|
||||
name: "Claude Opus 4.5 (EU)",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_PROFILE", "default")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
expect(providers[ProviderID.amazonBedrock].models["eu.anthropic.claude-opus-4-5-20251101-v1:0"]).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("Bedrock: model without prefix in US region should get us. prefix added", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Filesystem.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
"amazon-bedrock": {
|
||||
options: {
|
||||
region: "us-east-1",
|
||||
},
|
||||
models: {
|
||||
"anthropic.claude-opus-4-5-20251101-v1:0": {
|
||||
name: "Claude Opus 4.5",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("AWS_PROFILE", "default")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
|
||||
// Non-prefixed model should still be registered
|
||||
expect(providers[ProviderID.amazonBedrock].models["anthropic.claude-opus-4-5-20251101-v1:0"]).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
// Direct unit tests for cross-region inference profile prefix handling
|
||||
// These test the prefix detection logic used in getModel
|
||||
|
||||
describe("Bedrock cross-region prefix detection", () => {
|
||||
const crossRegionPrefixes = ["global.", "us.", "eu.", "jp.", "apac.", "au."]
|
||||
|
||||
test("should detect global. prefix", () => {
|
||||
const modelID = "global.anthropic.claude-opus-4-5-20251101-v1:0"
|
||||
const hasPrefix = crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))
|
||||
expect(hasPrefix).toBe(true)
|
||||
})
|
||||
|
||||
test("should detect us. prefix", () => {
|
||||
const modelID = "us.anthropic.claude-opus-4-5-20251101-v1:0"
|
||||
const hasPrefix = crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))
|
||||
expect(hasPrefix).toBe(true)
|
||||
})
|
||||
|
||||
test("should detect eu. prefix", () => {
|
||||
const modelID = "eu.anthropic.claude-opus-4-5-20251101-v1:0"
|
||||
const hasPrefix = crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))
|
||||
expect(hasPrefix).toBe(true)
|
||||
})
|
||||
|
||||
test("should detect jp. prefix", () => {
|
||||
const modelID = "jp.anthropic.claude-sonnet-4-20250514-v1:0"
|
||||
const hasPrefix = crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))
|
||||
expect(hasPrefix).toBe(true)
|
||||
})
|
||||
|
||||
test("should detect apac. prefix", () => {
|
||||
const modelID = "apac.anthropic.claude-sonnet-4-20250514-v1:0"
|
||||
const hasPrefix = crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))
|
||||
expect(hasPrefix).toBe(true)
|
||||
})
|
||||
|
||||
test("should detect au. prefix", () => {
|
||||
const modelID = "au.anthropic.claude-sonnet-4-5-20250929-v1:0"
|
||||
const hasPrefix = crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))
|
||||
expect(hasPrefix).toBe(true)
|
||||
})
|
||||
|
||||
test("should NOT detect prefix for non-prefixed model", () => {
|
||||
const modelID = "anthropic.claude-opus-4-5-20251101-v1:0"
|
||||
const hasPrefix = crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))
|
||||
expect(hasPrefix).toBe(false)
|
||||
})
|
||||
|
||||
test("should NOT detect prefix for amazon nova models", () => {
|
||||
const modelID = "amazon.nova-pro-v1:0"
|
||||
const hasPrefix = crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))
|
||||
expect(hasPrefix).toBe(false)
|
||||
})
|
||||
|
||||
test("should NOT detect prefix for cohere models", () => {
|
||||
const modelID = "cohere.command-r-plus-v1:0"
|
||||
const hasPrefix = crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))
|
||||
expect(hasPrefix).toBe(false)
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,523 @@
|
||||
import { convertToOpenAICompatibleChatMessages as convertToCopilotMessages } from "@/provider/sdk/copilot/chat/convert-to-openai-compatible-chat-messages"
|
||||
import { describe, test, expect } from "bun:test"
|
||||
|
||||
describe("system messages", () => {
|
||||
test("should convert system message content to string", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "system",
|
||||
content: "You are a helpful assistant with AGENTS.md instructions.",
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "system",
|
||||
content: "You are a helpful assistant with AGENTS.md instructions.",
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe("user messages", () => {
|
||||
test("should convert messages with only a text part to a string content", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "Hello" }],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([{ role: "user", content: "Hello" }])
|
||||
})
|
||||
|
||||
test("should convert messages with image parts", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Hello" },
|
||||
{
|
||||
type: "file",
|
||||
data: Buffer.from([0, 1, 2, 3]).toString("base64"),
|
||||
mediaType: "image/png",
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Hello" },
|
||||
{
|
||||
type: "image_url",
|
||||
image_url: { url: "data:image/png;base64,AAECAw==" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should convert messages with image parts from Uint8Array", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Hi" },
|
||||
{
|
||||
type: "file",
|
||||
data: new Uint8Array([0, 1, 2, 3]),
|
||||
mediaType: "image/png",
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Hi" },
|
||||
{
|
||||
type: "image_url",
|
||||
image_url: { url: "data:image/png;base64,AAECAw==" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle URL-based images", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "file",
|
||||
data: new URL("https://example.com/image.jpg"),
|
||||
mediaType: "image/*",
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "image_url",
|
||||
image_url: { url: "https://example.com/image.jpg" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle multiple text parts without flattening", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Part 1" },
|
||||
{ type: "text", text: "Part 2" },
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Part 1" },
|
||||
{ type: "text", text: "Part 2" },
|
||||
],
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe("assistant messages", () => {
|
||||
test("should convert assistant text messages", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "Hello back!" }],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Hello back!",
|
||||
tool_calls: undefined,
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle assistant message with null content when only tool calls", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "tool-call",
|
||||
toolCallId: "call1",
|
||||
toolName: "calculator",
|
||||
input: { a: 1, b: 2 },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{
|
||||
id: "call1",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "calculator",
|
||||
arguments: JSON.stringify({ a: 1, b: 2 }),
|
||||
},
|
||||
},
|
||||
],
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should concatenate multiple text parts", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "First part. " },
|
||||
{ type: "text", text: "Second part." },
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result[0].content).toBe("First part. Second part.")
|
||||
})
|
||||
})
|
||||
|
||||
describe("tool calls", () => {
|
||||
test("should stringify arguments to tool calls", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "tool-call",
|
||||
input: { foo: "bar123" },
|
||||
toolCallId: "quux",
|
||||
toolName: "thwomp",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "tool",
|
||||
content: [
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "quux",
|
||||
toolName: "thwomp",
|
||||
output: { type: "json", value: { oof: "321rab" } },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{
|
||||
id: "quux",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "thwomp",
|
||||
arguments: JSON.stringify({ foo: "bar123" }),
|
||||
},
|
||||
},
|
||||
],
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
{
|
||||
role: "tool",
|
||||
tool_call_id: "quux",
|
||||
content: JSON.stringify({ oof: "321rab" }),
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle text output type in tool results", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "tool",
|
||||
content: [
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "call-1",
|
||||
toolName: "getWeather",
|
||||
output: { type: "text", value: "It is sunny today" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "tool",
|
||||
tool_call_id: "call-1",
|
||||
content: "It is sunny today",
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle multiple tool results as separate messages", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "tool",
|
||||
content: [
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "call1",
|
||||
toolName: "api1",
|
||||
output: { type: "text", value: "Result 1" },
|
||||
},
|
||||
{
|
||||
type: "tool-result",
|
||||
toolCallId: "call2",
|
||||
toolName: "api2",
|
||||
output: { type: "text", value: "Result 2" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toHaveLength(2)
|
||||
expect(result[0]).toEqual({
|
||||
role: "tool",
|
||||
tool_call_id: "call1",
|
||||
content: "Result 1",
|
||||
})
|
||||
expect(result[1]).toEqual({
|
||||
role: "tool",
|
||||
tool_call_id: "call2",
|
||||
content: "Result 2",
|
||||
})
|
||||
})
|
||||
|
||||
test("should handle text plus multiple tool calls", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "text", text: "Checking... " },
|
||||
{
|
||||
type: "tool-call",
|
||||
toolCallId: "call1",
|
||||
toolName: "searchTool",
|
||||
input: { query: "Weather" },
|
||||
},
|
||||
{ type: "text", text: "Almost there..." },
|
||||
{
|
||||
type: "tool-call",
|
||||
toolCallId: "call2",
|
||||
toolName: "mapsTool",
|
||||
input: { location: "Paris" },
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Checking... Almost there...",
|
||||
tool_calls: [
|
||||
{
|
||||
id: "call1",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "searchTool",
|
||||
arguments: JSON.stringify({ query: "Weather" }),
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "call2",
|
||||
type: "function",
|
||||
function: {
|
||||
name: "mapsTool",
|
||||
arguments: JSON.stringify({ location: "Paris" }),
|
||||
},
|
||||
},
|
||||
],
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe("reasoning (copilot-specific)", () => {
|
||||
test("should omit reasoning_text without reasoning_opaque", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{ type: "reasoning", text: "Let me think about this..." },
|
||||
{ type: "text", text: "The answer is 42." },
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: "The answer is 42.",
|
||||
tool_calls: undefined,
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: undefined,
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should include reasoning_opaque from providerOptions", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "reasoning",
|
||||
text: "Thinking...",
|
||||
providerOptions: {
|
||||
copilot: { reasoningOpaque: "opaque-signature-123" },
|
||||
},
|
||||
},
|
||||
{ type: "text", text: "Done!" },
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Done!",
|
||||
tool_calls: undefined,
|
||||
reasoning_text: "Thinking...",
|
||||
reasoning_opaque: "opaque-signature-123",
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should include reasoning_opaque from text part providerOptions", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: "Done!",
|
||||
providerOptions: {
|
||||
copilot: { reasoningOpaque: "opaque-text-456" },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Done!",
|
||||
tool_calls: undefined,
|
||||
reasoning_text: undefined,
|
||||
reasoning_opaque: "opaque-text-456",
|
||||
},
|
||||
])
|
||||
})
|
||||
|
||||
test("should handle reasoning-only assistant message", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "reasoning",
|
||||
text: "Just thinking, no response yet",
|
||||
providerOptions: {
|
||||
copilot: { reasoningOpaque: "sig-abc" },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: null,
|
||||
tool_calls: undefined,
|
||||
reasoning_text: "Just thinking, no response yet",
|
||||
reasoning_opaque: "sig-abc",
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe("full conversation", () => {
|
||||
test("should convert a multi-turn conversation with reasoning", () => {
|
||||
const result = convertToCopilotMessages([
|
||||
{
|
||||
role: "system",
|
||||
content: "You are a helpful assistant.",
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "What is 2+2?" }],
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
{
|
||||
type: "reasoning",
|
||||
text: "Let me calculate 2+2...",
|
||||
providerOptions: {
|
||||
copilot: { reasoningOpaque: "sig-abc" },
|
||||
},
|
||||
},
|
||||
{ type: "text", text: "2+2 equals 4." },
|
||||
],
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "What about 3+3?" }],
|
||||
},
|
||||
])
|
||||
|
||||
expect(result).toHaveLength(4)
|
||||
|
||||
const systemMsg = result[0]
|
||||
expect(systemMsg.role).toBe("system")
|
||||
|
||||
// Assistant message should have reasoning fields
|
||||
const assistantMsg = result[2] as {
|
||||
reasoning_text?: string
|
||||
reasoning_opaque?: string
|
||||
}
|
||||
expect(assistantMsg.reasoning_text).toBe("Let me calculate 2+2...")
|
||||
expect(assistantMsg.reasoning_opaque).toBe("sig-abc")
|
||||
})
|
||||
})
|
||||
592
packages/tfcode/test/provider/copilot/copilot-chat-model.test.ts
Normal file
592
packages/tfcode/test/provider/copilot/copilot-chat-model.test.ts
Normal file
@@ -0,0 +1,592 @@
|
||||
import { OpenAICompatibleChatLanguageModel } from "@/provider/sdk/copilot/chat/openai-compatible-chat-language-model"
|
||||
import { describe, test, expect, mock } from "bun:test"
|
||||
import type { LanguageModelV2Prompt } from "@ai-sdk/provider"
|
||||
|
||||
async function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promise<T[]> {
|
||||
const reader = stream.getReader()
|
||||
const result: T[] = []
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
result.push(value)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
const TEST_PROMPT: LanguageModelV2Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
|
||||
|
||||
// Fixtures from copilot_test.exs
|
||||
const FIXTURES = {
|
||||
basicText: [
|
||||
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gemini-2.0-flash-001","choices":[{"index":0,"delta":{"role":"assistant","content":"Hello"},"finish_reason":null}]}`,
|
||||
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gemini-2.0-flash-001","choices":[{"index":0,"delta":{"content":" world"},"finish_reason":null}]}`,
|
||||
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1677652288,"model":"gemini-2.0-flash-001","choices":[{"index":0,"delta":{"content":"!"},"finish_reason":"stop"}]}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
reasoningWithToolCalls: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Understanding Dayzee's Purpose**\\n\\nI'm starting to get a better handle on \`dayzee\`.\\n\\n"}}],"created":1764940861,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Assessing Dayzee's Functionality**\\n\\nI've reviewed the files.\\n\\n"}}],"created":1764940862,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"filePath\\":\\"/README.md\\"}","name":"read_file"},"id":"call_abc123","index":0,"type":"function"}],"reasoning_opaque":"4CUQ6696CwSXOdQ5rtvDimqA91tBzfmga4ieRbmZ5P67T2NLW3"}}],"created":1764940862,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"filePath\\":\\"/mix.exs\\"}","name":"read_file"},"id":"call_def456","index":1,"type":"function"}]}}],"created":1764940862,"id":"OdwyabKMI9yel7oPlbzgwQM","usage":{"completion_tokens":53,"prompt_tokens":19581,"prompt_tokens_details":{"cached_tokens":17068},"total_tokens":19768,"reasoning_tokens":134},"model":"gemini-3-pro-preview"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
reasoningWithOpaqueAtEnd: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Analyzing the Inquiry's Nature**\\n\\nI'm currently parsing the user's question.\\n\\n"}}],"created":1765201729,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Reconciling User's Input**\\n\\nI'm grappling with the context.\\n\\n"}}],"created":1765201730,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":"I am Tidewave, a highly skilled AI coding agent.\\n\\n","role":"assistant"}}],"created":1765201730,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"finish_reason":"stop","index":0,"delta":{"content":"How can I help you?","role":"assistant","reasoning_opaque":"/PMlTqxqSJZnUBDHgnnJKLVI4eZQ"}}],"created":1765201730,"id":"Ptc2afqsCIHqlOoP653UiAI","usage":{"completion_tokens":59,"prompt_tokens":5778,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":5932,"reasoning_tokens":95},"model":"gemini-3-pro-preview"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
// Case where reasoning_opaque and content come in the SAME chunk
|
||||
reasoningWithOpaqueAndContentSameChunk: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Understanding the Query's Nature**\\n\\nI'm currently grappling with the user's philosophical query.\\n\\n"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Framing the Response's Core**\\n\\nNow, I'm structuring my response.\\n\\n"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":"Of course. I'm thinking right now.","role":"assistant","reasoning_opaque":"ExXaGwW7jBo39OXRe9EPoFGN1rOtLJBx"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"finish_reason":"stop","index":0,"delta":{"content":" What's on your mind?","role":"assistant"}}],"created":1766062103,"id":"FPhDacixL9zrlOoPqLSuyQ4","usage":{"completion_tokens":78,"prompt_tokens":3767,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":3915,"reasoning_tokens":70},"model":"gemini-2.5-pro"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
// Case where reasoning_opaque and content come in same chunk, followed by tool calls
|
||||
reasoningWithOpaqueContentAndToolCalls: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Analyzing the Structure**\\n\\nI'm currently trying to get a handle on the project's layout. My initial focus is on the file structure itself, specifically the directory organization. I'm hoping this will illuminate how different components interact. I'll need to identify the key modules and their dependencies.\\n\\n\\n"}}],"created":1766066995,"id":"MQtEafqbFYTZsbwPwuCVoAg","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":"Okay, I need to check out the project's file structure.","role":"assistant","reasoning_opaque":"WHOd3dYFnxEBOsKUXjbX6c2rJa0fS214FHbsj+A3Q+i63SFo7H/92RsownAzyo0h2qEy3cOcrvAatsMx51eCKiMSqt4dYWZhd5YVSgF0CehkpDbWBP/SoRqLU1dhCmUJV/6b5uYFBOzKLBGNadyhI7T1gWFlXntwc6SNjH6DujnFPeVr+L8DdOoUJGJrw2aOfm9NtkXA6wZh9t7dt+831yIIImjD9MHczuXoXj8K7tyLpIJ9KlVXMhnO4IKSYNdKRtoHlGTmudAp5MgH/vLWb6oSsL+ZJl/OdF3WBOeanGhYNoByCRDSvR7anAR/9m5zf9yUax+u/nFg+gzmhFacnzZGtSmcvJ4/4HWKNtUkRASTKeN94DXB8j1ptB/i6ldaMAz2ZyU+sbjPWI8aI4fKJ2MuO01u3uE87xVwpWiM+0rahIzJsllI5edwOaOFtF4tnlCTQafbxHwCZR62uON2E+IjGzW80MzyfYrbLBJKS5zTeHCgPYQSNaKzPfpzkQvdwo3JUnJYcEHgGeKzkq5sbvS5qitCYI7Xue0V98S6/KnUSPnDQBjNnas2i6BqJV2vuCEU/Y3ucrlKVbuRIFCZXCyLzrsGeRLRKlrf5S/HDAQ04IOPQVQhBPvhX0nDjhZB"}}],"created":1766066995,"id":"MQtEafqbFYTZsbwPwuCVoAg","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-2.5-pro"}`,
|
||||
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{}","name":"list_project_files"},"id":"call_MHxqRDd5WVo3NU8wUXRaMmc0MFE","index":0,"type":"function"}]}}],"created":1766066995,"id":"MQtEafqbFYTZsbwPwuCVoAg","usage":{"completion_tokens":19,"prompt_tokens":3767,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":3797,"reasoning_tokens":11},"model":"gemini-2.5-pro"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
// Case where reasoning goes directly to tool_calls with NO content
|
||||
// reasoning_opaque and tool_calls come in the same chunk
|
||||
reasoningDirectlyToToolCalls: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Executing and Analyzing HTML**\\n\\nI've successfully captured the HTML snapshot using the \`browser_eval\` tool, giving me a solid understanding of the page structure. Now, I'm shifting focus to Elixir code execution with \`project_eval\` to assess my ability to work within the project's environment.\\n\\n\\n"}}],"created":1766068643,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","reasoning_text":"**Testing Project Contexts**\\n\\nI've got the HTML body snapshot from \`browser_eval\`, which is a helpful reference. Next, I'm testing my ability to run Elixir code in the project with \`project_eval\`. I'm starting with a simple sum: \`1 + 1\`. This will confirm I'm set up to interact with the project's codebase.\\n\\n\\n"}}],"created":1766068644,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-pro-preview"}`,
|
||||
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"code\\":\\"1 + 1\\"}","name":"project_eval"},"id":"call_MHw3RDhmT1J5Z3B6WlhpVjlveTc","index":0,"type":"function"}],"reasoning_opaque":"ytGNWFf2doK38peANDvm7whkLPKrd+Fv6/k34zEPBF6Qwitj4bTZT0FBXleydLb6"}}],"created":1766068644,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":12,"prompt_tokens":8677,"prompt_tokens_details":{"cached_tokens":3692},"total_tokens":8768,"reasoning_tokens":79},"model":"gemini-3-pro-preview"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
|
||||
reasoningOpaqueWithToolCallsNoReasoningText: [
|
||||
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{}","name":"read_file"},"id":"call_reasoning_only","index":0,"type":"function"}],"reasoning_opaque":"opaque-xyz"}}],"created":1769917420,"id":"opaque-only","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-flash-preview"}`,
|
||||
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{}","name":"read_file"},"id":"call_reasoning_only_2","index":1,"type":"function"}]}}],"created":1769917420,"id":"opaque-only","usage":{"completion_tokens":12,"prompt_tokens":123,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":135,"reasoning_tokens":0},"model":"gemini-3-flash-preview"}`,
|
||||
`data: [DONE]`,
|
||||
],
|
||||
}
|
||||
|
||||
function createMockFetch(chunks: string[]) {
|
||||
return mock(async () => {
|
||||
const body = new ReadableStream({
|
||||
start(controller) {
|
||||
for (const chunk of chunks) {
|
||||
controller.enqueue(new TextEncoder().encode(chunk + "\n\n"))
|
||||
}
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
return new Response(body, {
|
||||
status: 200,
|
||||
headers: { "Content-Type": "text/event-stream" },
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function createModel(fetchFn: ReturnType<typeof mock>) {
|
||||
return new OpenAICompatibleChatLanguageModel("test-model", {
|
||||
provider: "copilot.chat",
|
||||
url: () => "https://api.test.com/chat/completions",
|
||||
headers: () => ({ Authorization: "Bearer test-token" }),
|
||||
fetch: fetchFn as any,
|
||||
})
|
||||
}
|
||||
|
||||
describe("doStream", () => {
|
||||
test("should stream text deltas", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.basicText)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Filter to just the key events
|
||||
const textParts = parts.filter(
|
||||
(p) => p.type === "text-start" || p.type === "text-delta" || p.type === "text-end" || p.type === "finish",
|
||||
)
|
||||
|
||||
expect(textParts).toMatchObject([
|
||||
{ type: "text-start", id: "txt-0" },
|
||||
{ type: "text-delta", id: "txt-0", delta: "Hello" },
|
||||
{ type: "text-delta", id: "txt-0", delta: " world" },
|
||||
{ type: "text-delta", id: "txt-0", delta: "!" },
|
||||
{ type: "text-end", id: "txt-0" },
|
||||
{ type: "finish", finishReason: "stop" },
|
||||
])
|
||||
})
|
||||
|
||||
test("should stream reasoning with tool calls and capture reasoning_opaque", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningWithToolCalls)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Check reasoning parts
|
||||
const reasoningParts = parts.filter(
|
||||
(p) => p.type === "reasoning-start" || p.type === "reasoning-delta" || p.type === "reasoning-end",
|
||||
)
|
||||
|
||||
expect(reasoningParts[0]).toEqual({
|
||||
type: "reasoning-start",
|
||||
id: "reasoning-0",
|
||||
})
|
||||
|
||||
expect(reasoningParts[1]).toMatchObject({
|
||||
type: "reasoning-delta",
|
||||
id: "reasoning-0",
|
||||
})
|
||||
expect((reasoningParts[1] as { delta: string }).delta).toContain("**Understanding Dayzee's Purpose**")
|
||||
|
||||
expect(reasoningParts[2]).toMatchObject({
|
||||
type: "reasoning-delta",
|
||||
id: "reasoning-0",
|
||||
})
|
||||
expect((reasoningParts[2] as { delta: string }).delta).toContain("**Assessing Dayzee's Functionality**")
|
||||
|
||||
// reasoning_opaque should be in reasoning-end providerMetadata
|
||||
const reasoningEnd = reasoningParts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: "4CUQ6696CwSXOdQ5rtvDimqA91tBzfmga4ieRbmZ5P67T2NLW3",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Check tool calls
|
||||
const toolParts = parts.filter(
|
||||
(p) => p.type === "tool-input-start" || p.type === "tool-call" || p.type === "tool-input-end",
|
||||
)
|
||||
|
||||
expect(toolParts).toContainEqual({
|
||||
type: "tool-input-start",
|
||||
id: "call_abc123",
|
||||
toolName: "read_file",
|
||||
})
|
||||
|
||||
expect(toolParts).toContainEqual(
|
||||
expect.objectContaining({
|
||||
type: "tool-call",
|
||||
toolCallId: "call_abc123",
|
||||
toolName: "read_file",
|
||||
}),
|
||||
)
|
||||
|
||||
expect(toolParts).toContainEqual({
|
||||
type: "tool-input-start",
|
||||
id: "call_def456",
|
||||
toolName: "read_file",
|
||||
})
|
||||
|
||||
// Check finish
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
usage: {
|
||||
inputTokens: 19581,
|
||||
outputTokens: 53,
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("should handle reasoning_opaque that comes at end with text in between", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningWithOpaqueAtEnd)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Check that reasoning comes first
|
||||
const reasoningStart = parts.findIndex((p) => p.type === "reasoning-start")
|
||||
const textStart = parts.findIndex((p) => p.type === "text-start")
|
||||
expect(reasoningStart).toBeLessThan(textStart)
|
||||
|
||||
// Check reasoning deltas
|
||||
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
|
||||
expect(reasoningDeltas).toHaveLength(2)
|
||||
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Analyzing the Inquiry's Nature**")
|
||||
expect((reasoningDeltas[1] as { delta: string }).delta).toContain("**Reconciling User's Input**")
|
||||
|
||||
// Check text deltas
|
||||
const textDeltas = parts.filter((p) => p.type === "text-delta")
|
||||
expect(textDeltas).toHaveLength(2)
|
||||
expect((textDeltas[0] as { delta: string }).delta).toContain("I am Tidewave")
|
||||
expect((textDeltas[1] as { delta: string }).delta).toContain("How can I help you?")
|
||||
|
||||
// reasoning-end should be emitted before text-start
|
||||
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
|
||||
const textStartIndex = parts.findIndex((p) => p.type === "text-start")
|
||||
expect(reasoningEndIndex).toBeGreaterThan(-1)
|
||||
expect(reasoningEndIndex).toBeLessThan(textStartIndex)
|
||||
|
||||
// In this fixture, reasoning_opaque comes AFTER content has started (in chunk 4)
|
||||
// So it arrives too late to be attached to reasoning-end. But it should still
|
||||
// be captured and included in the finish event's providerMetadata.
|
||||
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
})
|
||||
|
||||
// reasoning_opaque should be in the finish event's providerMetadata
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "stop",
|
||||
usage: {
|
||||
inputTokens: 5778,
|
||||
outputTokens: 59,
|
||||
},
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: "/PMlTqxqSJZnUBDHgnnJKLVI4eZQ",
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("should handle reasoning_opaque and content in the same chunk", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningWithOpaqueAndContentSameChunk)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// The critical test: reasoning-end should come BEFORE text-start
|
||||
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
|
||||
const textStartIndex = parts.findIndex((p) => p.type === "text-start")
|
||||
expect(reasoningEndIndex).toBeGreaterThan(-1)
|
||||
expect(textStartIndex).toBeGreaterThan(-1)
|
||||
expect(reasoningEndIndex).toBeLessThan(textStartIndex)
|
||||
|
||||
// Check reasoning deltas
|
||||
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
|
||||
expect(reasoningDeltas).toHaveLength(2)
|
||||
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Understanding the Query's Nature**")
|
||||
expect((reasoningDeltas[1] as { delta: string }).delta).toContain("**Framing the Response's Core**")
|
||||
|
||||
// reasoning_opaque should be in reasoning-end even though it came with content
|
||||
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: "ExXaGwW7jBo39OXRe9EPoFGN1rOtLJBx",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Check text deltas
|
||||
const textDeltas = parts.filter((p) => p.type === "text-delta")
|
||||
expect(textDeltas).toHaveLength(2)
|
||||
expect((textDeltas[0] as { delta: string }).delta).toContain("Of course. I'm thinking right now.")
|
||||
expect((textDeltas[1] as { delta: string }).delta).toContain("What's on your mind?")
|
||||
|
||||
// Check finish
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "stop",
|
||||
})
|
||||
})
|
||||
|
||||
test("should handle reasoning_opaque and content followed by tool calls", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningWithOpaqueContentAndToolCalls)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Check that reasoning comes first, then text, then tool calls
|
||||
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
|
||||
const textStartIndex = parts.findIndex((p) => p.type === "text-start")
|
||||
const toolStartIndex = parts.findIndex((p) => p.type === "tool-input-start")
|
||||
|
||||
expect(reasoningEndIndex).toBeGreaterThan(-1)
|
||||
expect(textStartIndex).toBeGreaterThan(-1)
|
||||
expect(toolStartIndex).toBeGreaterThan(-1)
|
||||
expect(reasoningEndIndex).toBeLessThan(textStartIndex)
|
||||
expect(textStartIndex).toBeLessThan(toolStartIndex)
|
||||
|
||||
// Check reasoning content
|
||||
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
|
||||
expect(reasoningDeltas).toHaveLength(1)
|
||||
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Analyzing the Structure**")
|
||||
|
||||
// reasoning_opaque should be in reasoning-end (comes with content in same chunk)
|
||||
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: expect.stringContaining("WHOd3dYFnxEBOsKUXjbX6c2rJa0fS214"),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Check text content
|
||||
const textDeltas = parts.filter((p) => p.type === "text-delta")
|
||||
expect(textDeltas).toHaveLength(1)
|
||||
expect((textDeltas[0] as { delta: string }).delta).toContain(
|
||||
"Okay, I need to check out the project's file structure.",
|
||||
)
|
||||
|
||||
// Check tool call
|
||||
const toolParts = parts.filter(
|
||||
(p) => p.type === "tool-input-start" || p.type === "tool-call" || p.type === "tool-input-end",
|
||||
)
|
||||
|
||||
expect(toolParts).toContainEqual({
|
||||
type: "tool-input-start",
|
||||
id: "call_MHxqRDd5WVo3NU8wUXRaMmc0MFE",
|
||||
toolName: "list_project_files",
|
||||
})
|
||||
|
||||
expect(toolParts).toContainEqual(
|
||||
expect.objectContaining({
|
||||
type: "tool-call",
|
||||
toolCallId: "call_MHxqRDd5WVo3NU8wUXRaMmc0MFE",
|
||||
toolName: "list_project_files",
|
||||
}),
|
||||
)
|
||||
|
||||
// Check finish
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
usage: {
|
||||
inputTokens: 3767,
|
||||
outputTokens: 19,
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("should emit reasoning-end before tool-input-start when reasoning goes directly to tool calls", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningDirectlyToToolCalls)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
// Critical check: reasoning-end MUST come before tool-input-start
|
||||
const reasoningEndIndex = parts.findIndex((p) => p.type === "reasoning-end")
|
||||
const toolStartIndex = parts.findIndex((p) => p.type === "tool-input-start")
|
||||
|
||||
expect(reasoningEndIndex).toBeGreaterThan(-1)
|
||||
expect(toolStartIndex).toBeGreaterThan(-1)
|
||||
expect(reasoningEndIndex).toBeLessThan(toolStartIndex)
|
||||
|
||||
// Check reasoning parts
|
||||
const reasoningDeltas = parts.filter((p) => p.type === "reasoning-delta")
|
||||
expect(reasoningDeltas).toHaveLength(2)
|
||||
expect((reasoningDeltas[0] as { delta: string }).delta).toContain("**Executing and Analyzing HTML**")
|
||||
expect((reasoningDeltas[1] as { delta: string }).delta).toContain("**Testing Project Contexts**")
|
||||
|
||||
// reasoning_opaque should be in reasoning-end providerMetadata
|
||||
const reasoningEnd = parts.find((p) => p.type === "reasoning-end")
|
||||
expect(reasoningEnd).toMatchObject({
|
||||
type: "reasoning-end",
|
||||
id: "reasoning-0",
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: "ytGNWFf2doK38peANDvm7whkLPKrd+Fv6/k34zEPBF6Qwitj4bTZT0FBXleydLb6",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// No text parts should exist
|
||||
const textParts = parts.filter((p) => p.type === "text-start" || p.type === "text-delta" || p.type === "text-end")
|
||||
expect(textParts).toHaveLength(0)
|
||||
|
||||
// Check tool call
|
||||
const toolCall = parts.find((p) => p.type === "tool-call")
|
||||
expect(toolCall).toMatchObject({
|
||||
type: "tool-call",
|
||||
toolCallId: "call_MHw3RDhmT1J5Z3B6WlhpVjlveTc",
|
||||
toolName: "project_eval",
|
||||
})
|
||||
|
||||
// Check finish
|
||||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
})
|
||||
})
|
||||
|
||||
test("should attach reasoning_opaque to tool calls without reasoning_text", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.reasoningOpaqueWithToolCallsNoReasoningText)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
const reasoningParts = parts.filter(
|
||||
(p) => p.type === "reasoning-start" || p.type === "reasoning-delta" || p.type === "reasoning-end",
|
||||
)
|
||||
|
||||
expect(reasoningParts).toHaveLength(0)
|
||||
|
||||
const toolCall = parts.find((p) => p.type === "tool-call" && p.toolCallId === "call_reasoning_only")
|
||||
expect(toolCall).toMatchObject({
|
||||
type: "tool-call",
|
||||
toolCallId: "call_reasoning_only",
|
||||
toolName: "read_file",
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
reasoningOpaque: "opaque-xyz",
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("should include response metadata from first chunk", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.basicText)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
const metadata = parts.find((p) => p.type === "response-metadata")
|
||||
expect(metadata).toMatchObject({
|
||||
type: "response-metadata",
|
||||
id: "chatcmpl-123",
|
||||
modelId: "gemini-2.0-flash-001",
|
||||
})
|
||||
})
|
||||
|
||||
test("should emit stream-start with warnings", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.basicText)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
const streamStart = parts.find((p) => p.type === "stream-start")
|
||||
expect(streamStart).toEqual({
|
||||
type: "stream-start",
|
||||
warnings: [],
|
||||
})
|
||||
})
|
||||
|
||||
test("should include raw chunks when requested", async () => {
|
||||
const mockFetch = createMockFetch(FIXTURES.basicText)
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
const { stream } = await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
includeRawChunks: true,
|
||||
})
|
||||
|
||||
const parts = await convertReadableStreamToArray(stream)
|
||||
|
||||
const rawChunks = parts.filter((p) => p.type === "raw")
|
||||
expect(rawChunks.length).toBeGreaterThan(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe("request body", () => {
|
||||
test("should send tools in OpenAI format", async () => {
|
||||
let capturedBody: unknown
|
||||
const mockFetch = mock(async (_url: string, init?: RequestInit) => {
|
||||
capturedBody = JSON.parse(init?.body as string)
|
||||
return new Response(
|
||||
new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(`data: [DONE]\n\n`))
|
||||
controller.close()
|
||||
},
|
||||
}),
|
||||
{ status: 200, headers: { "Content-Type": "text/event-stream" } },
|
||||
)
|
||||
})
|
||||
|
||||
const model = createModel(mockFetch)
|
||||
|
||||
await model.doStream({
|
||||
prompt: TEST_PROMPT,
|
||||
tools: [
|
||||
{
|
||||
type: "function",
|
||||
name: "get_weather",
|
||||
description: "Get the weather for a location",
|
||||
inputSchema: {
|
||||
type: "object",
|
||||
properties: {
|
||||
location: { type: "string" },
|
||||
},
|
||||
required: ["location"],
|
||||
},
|
||||
},
|
||||
],
|
||||
includeRawChunks: false,
|
||||
})
|
||||
|
||||
expect((capturedBody as { tools: unknown[] }).tools).toEqual([
|
||||
{
|
||||
type: "function",
|
||||
function: {
|
||||
name: "get_weather",
|
||||
description: "Get the weather for a location",
|
||||
parameters: {
|
||||
type: "object",
|
||||
properties: {
|
||||
location: { type: "string" },
|
||||
},
|
||||
required: ["location"],
|
||||
},
|
||||
},
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
408
packages/tfcode/test/provider/gitlab-duo.test.ts
Normal file
408
packages/tfcode/test/provider/gitlab-duo.test.ts
Normal file
@@ -0,0 +1,408 @@
|
||||
import { test, expect, describe } from "bun:test"
|
||||
import path from "path"
|
||||
|
||||
import { ProviderID, ModelID } from "../../src/provider/schema"
|
||||
import { tmpdir } from "../fixture/fixture"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Provider } from "../../src/provider/provider"
|
||||
import { Env } from "../../src/env"
|
||||
import { Global } from "../../src/global"
|
||||
import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
|
||||
|
||||
test("GitLab Duo: loads provider with API key from environment", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-gitlab-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].key).toBe("test-gitlab-token")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("GitLab Duo: config instanceUrl option sets baseURL", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
gitlab: {
|
||||
options: {
|
||||
instanceUrl: "https://gitlab.example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
Env.set("GITLAB_INSTANCE_URL", "https://gitlab.example.com")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.example.com")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("GitLab Duo: loads with OAuth token from auth.json", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
const authPath = path.join(Global.Path.data, "auth.json")
|
||||
await Bun.write(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
gitlab: {
|
||||
type: "oauth",
|
||||
access: "test-access-token",
|
||||
refresh: "test-refresh-token",
|
||||
expires: Date.now() + 3600000,
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("GitLab Duo: loads with Personal Access Token from auth.json", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
const authPath2 = path.join(Global.Path.data, "auth.json")
|
||||
await Bun.write(
|
||||
authPath2,
|
||||
JSON.stringify({
|
||||
gitlab: {
|
||||
type: "api",
|
||||
key: "glpat-test-pat-token",
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].key).toBe("glpat-test-pat-token")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("GitLab Duo: supports self-hosted instance configuration", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
gitlab: {
|
||||
options: {
|
||||
instanceUrl: "https://gitlab.company.internal",
|
||||
apiKey: "glpat-internal-token",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_INSTANCE_URL", "https://gitlab.company.internal")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.company.internal")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("GitLab Duo: config apiKey takes precedence over environment variable", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
gitlab: {
|
||||
options: {
|
||||
apiKey: "config-token",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "env-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("GitLab Duo: includes context-1m beta header in aiGatewayHeaders", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.aiGatewayHeaders?.["anthropic-beta"]).toContain(
|
||||
"context-1m-2025-08-07",
|
||||
)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("GitLab Duo: supports feature flags configuration", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
gitlab: {
|
||||
options: {
|
||||
featureFlags: {
|
||||
duo_agent_platform_agentic_chat: true,
|
||||
duo_agent_platform: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.featureFlags).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.featureFlags?.duo_agent_platform_agentic_chat).toBe(true)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("GitLab Duo: has multiple agentic chat models available", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
const models = Object.keys(providers[ProviderID.gitlab].models)
|
||||
expect(models.length).toBeGreaterThan(0)
|
||||
expect(models).toContain("duo-chat-haiku-4-5")
|
||||
expect(models).toContain("duo-chat-sonnet-4-5")
|
||||
expect(models).toContain("duo-chat-opus-4-5")
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
describe("GitLab Duo: workflow model routing", () => {
|
||||
test("duo-workflow-* model routes through workflowChat", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
const gitlab = providers[ProviderID.gitlab]
|
||||
expect(gitlab).toBeDefined()
|
||||
gitlab.models["duo-workflow-sonnet-4-6"] = {
|
||||
id: ModelID.make("duo-workflow-sonnet-4-6"),
|
||||
providerID: ProviderID.make("gitlab"),
|
||||
name: "Agent Platform (Claude Sonnet 4.6)",
|
||||
family: "",
|
||||
api: { id: "duo-workflow-sonnet-4-6", url: "https://gitlab.com", npm: "gitlab-ai-provider" },
|
||||
status: "active",
|
||||
headers: {},
|
||||
options: { workflowRef: "claude_sonnet_4_6" },
|
||||
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
|
||||
limit: { context: 200000, output: 64000 },
|
||||
capabilities: {
|
||||
temperature: false,
|
||||
reasoning: true,
|
||||
attachment: true,
|
||||
toolcall: true,
|
||||
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
||||
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
||||
interleaved: false,
|
||||
},
|
||||
release_date: "",
|
||||
variants: {},
|
||||
}
|
||||
const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-workflow-sonnet-4-6"))
|
||||
expect(model).toBeDefined()
|
||||
expect(model.options?.workflowRef).toBe("claude_sonnet_4_6")
|
||||
const language = await Provider.getLanguage(model)
|
||||
expect(language).toBeDefined()
|
||||
expect(language).toBeInstanceOf(GitLabWorkflowLanguageModel)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("duo-chat-* model routes through agenticChat (not workflow)", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
|
||||
expect(model).toBeDefined()
|
||||
const language = await Provider.getLanguage(model)
|
||||
expect(language).toBeDefined()
|
||||
expect(language).not.toBeInstanceOf(GitLabWorkflowLanguageModel)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("model.options merged with provider.options in getLanguage", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
const gitlab = providers[ProviderID.gitlab]
|
||||
expect(gitlab.options?.featureFlags).toBeDefined()
|
||||
const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
|
||||
expect(model).toBeDefined()
|
||||
expect(model.options).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("GitLab Duo: static models", () => {
|
||||
test("static duo-chat models always present regardless of discovery", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
const models = Object.keys(providers[ProviderID.gitlab].models)
|
||||
expect(models).toContain("duo-chat-haiku-4-5")
|
||||
expect(models).toContain("duo-chat-sonnet-4-5")
|
||||
expect(models).toContain("duo-chat-opus-4-5")
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
2284
packages/tfcode/test/provider/provider.test.ts
Normal file
2284
packages/tfcode/test/provider/provider.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
2721
packages/tfcode/test/provider/transform.test.ts
Normal file
2721
packages/tfcode/test/provider/transform.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user