mirror of
https://gitea.toothfairyai.com/ToothFairyAI/tf_code.git
synced 2026-03-30 13:54:01 +00:00
- Rename packages/opencode → packages/tfcode (directory only) - Rename bin/opencode → bin/tfcode (CLI binary) - Rename .opencode → .tfcode (config directory) - Update package.json name and bin field - Update config directory path references (.tfcode) - Keep internal code references as 'opencode' for easy upstream sync - Keep @opencode-ai/* workspace package names This minimal branding approach allows clean merges from upstream opencode repository while providing tfcode branding for users.
2722 lines
78 KiB
TypeScript
2722 lines
78 KiB
TypeScript
import { describe, expect, test } from "bun:test"
|
|
import { ProviderTransform } from "../../src/provider/transform"
|
|
import { ModelID, ProviderID } from "../../src/provider/schema"
|
|
|
|
const OUTPUT_TOKEN_MAX = 32000
|
|
|
|
describe("ProviderTransform.options - setCacheKey", () => {
|
|
const sessionID = "test-session-123"
|
|
|
|
const mockModel = {
|
|
id: "anthropic/claude-3-5-sonnet",
|
|
providerID: "anthropic",
|
|
api: {
|
|
id: "claude-3-5-sonnet-20241022",
|
|
url: "https://api.anthropic.com",
|
|
npm: "@ai-sdk/anthropic",
|
|
},
|
|
name: "Claude 3.5 Sonnet",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: false,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: {
|
|
input: 0.003,
|
|
output: 0.015,
|
|
cache: { read: 0.0003, write: 0.00375 },
|
|
},
|
|
limit: {
|
|
context: 200000,
|
|
output: 8192,
|
|
},
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
} as any
|
|
|
|
test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
|
|
const result = ProviderTransform.options({
|
|
model: mockModel,
|
|
sessionID,
|
|
providerOptions: { setCacheKey: true },
|
|
})
|
|
expect(result.promptCacheKey).toBe(sessionID)
|
|
})
|
|
|
|
test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
|
|
const result = ProviderTransform.options({
|
|
model: mockModel,
|
|
sessionID,
|
|
providerOptions: { setCacheKey: false },
|
|
})
|
|
expect(result.promptCacheKey).toBeUndefined()
|
|
})
|
|
|
|
test("should not set promptCacheKey when providerOptions is undefined", () => {
|
|
const result = ProviderTransform.options({
|
|
model: mockModel,
|
|
sessionID,
|
|
providerOptions: undefined,
|
|
})
|
|
expect(result.promptCacheKey).toBeUndefined()
|
|
})
|
|
|
|
test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
|
|
const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
|
|
expect(result.promptCacheKey).toBeUndefined()
|
|
})
|
|
|
|
test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
|
|
const openaiModel = {
|
|
...mockModel,
|
|
providerID: "openai",
|
|
api: {
|
|
id: "gpt-4",
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
}
|
|
const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
|
|
expect(result.promptCacheKey).toBe(sessionID)
|
|
})
|
|
|
|
test("should set store=false for openai provider", () => {
|
|
const openaiModel = {
|
|
...mockModel,
|
|
providerID: "openai",
|
|
api: {
|
|
id: "gpt-4",
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
}
|
|
const result = ProviderTransform.options({
|
|
model: openaiModel,
|
|
sessionID,
|
|
providerOptions: {},
|
|
})
|
|
expect(result.store).toBe(false)
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.options - google thinkingConfig gating", () => {
|
|
const sessionID = "test-session-123"
|
|
|
|
const createGoogleModel = (reasoning: boolean, npm: "@ai-sdk/google" | "@ai-sdk/google-vertex") =>
|
|
({
|
|
id: `${npm === "@ai-sdk/google" ? "google" : "google-vertex"}/gemini-2.0-flash`,
|
|
providerID: npm === "@ai-sdk/google" ? "google" : "google-vertex",
|
|
api: {
|
|
id: "gemini-2.0-flash",
|
|
url: npm === "@ai-sdk/google" ? "https://generativelanguage.googleapis.com" : "https://vertexai.googleapis.com",
|
|
npm,
|
|
},
|
|
name: "Gemini 2.0 Flash",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: {
|
|
input: 0.001,
|
|
output: 0.002,
|
|
cache: { read: 0.0001, write: 0.0002 },
|
|
},
|
|
limit: {
|
|
context: 1_000_000,
|
|
output: 8192,
|
|
},
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
}) as any
|
|
|
|
test("does not set thinkingConfig for google models without reasoning capability", () => {
|
|
const result = ProviderTransform.options({
|
|
model: createGoogleModel(false, "@ai-sdk/google"),
|
|
sessionID,
|
|
providerOptions: {},
|
|
})
|
|
expect(result.thinkingConfig).toBeUndefined()
|
|
})
|
|
|
|
test("sets thinkingConfig for google models with reasoning capability", () => {
|
|
const result = ProviderTransform.options({
|
|
model: createGoogleModel(true, "@ai-sdk/google"),
|
|
sessionID,
|
|
providerOptions: {},
|
|
})
|
|
expect(result.thinkingConfig).toEqual({
|
|
includeThoughts: true,
|
|
})
|
|
})
|
|
|
|
test("does not set thinkingConfig for vertex models without reasoning capability", () => {
|
|
const result = ProviderTransform.options({
|
|
model: createGoogleModel(false, "@ai-sdk/google-vertex"),
|
|
sessionID,
|
|
providerOptions: {},
|
|
})
|
|
expect(result.thinkingConfig).toBeUndefined()
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
|
|
const sessionID = "test-session-123"
|
|
|
|
const createGpt5Model = (apiId: string) =>
|
|
({
|
|
id: `openai/${apiId}`,
|
|
providerID: "openai",
|
|
api: {
|
|
id: apiId,
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
name: apiId,
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: true,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: false },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
|
|
limit: { context: 128000, output: 4096 },
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
}) as any
|
|
|
|
test("gpt-5.2 should have textVerbosity set to low", () => {
|
|
const model = createGpt5Model("gpt-5.2")
|
|
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
|
|
expect(result.textVerbosity).toBe("low")
|
|
})
|
|
|
|
test("gpt-5.1 should have textVerbosity set to low", () => {
|
|
const model = createGpt5Model("gpt-5.1")
|
|
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
|
|
expect(result.textVerbosity).toBe("low")
|
|
})
|
|
|
|
test("gpt-5.2-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
|
|
const model = createGpt5Model("gpt-5.2-chat-latest")
|
|
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
|
|
expect(result.textVerbosity).toBeUndefined()
|
|
})
|
|
|
|
test("gpt-5.1-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
|
|
const model = createGpt5Model("gpt-5.1-chat-latest")
|
|
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
|
|
expect(result.textVerbosity).toBeUndefined()
|
|
})
|
|
|
|
test("gpt-5.2-chat should NOT have textVerbosity set", () => {
|
|
const model = createGpt5Model("gpt-5.2-chat")
|
|
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
|
|
expect(result.textVerbosity).toBeUndefined()
|
|
})
|
|
|
|
test("gpt-5-chat should NOT have textVerbosity set", () => {
|
|
const model = createGpt5Model("gpt-5-chat")
|
|
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
|
|
expect(result.textVerbosity).toBeUndefined()
|
|
})
|
|
|
|
test("gpt-5.2-codex should NOT have textVerbosity set (codex models excluded)", () => {
|
|
const model = createGpt5Model("gpt-5.2-codex")
|
|
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
|
|
expect(result.textVerbosity).toBeUndefined()
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.options - gateway", () => {
|
|
const sessionID = "test-session-123"
|
|
|
|
const createModel = (id: string) =>
|
|
({
|
|
id,
|
|
providerID: "vercel",
|
|
api: {
|
|
id,
|
|
url: "https://ai-gateway.vercel.sh/v3/ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
name: id,
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: true,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: {
|
|
input: 0.001,
|
|
output: 0.002,
|
|
cache: { read: 0.0001, write: 0.0002 },
|
|
},
|
|
limit: {
|
|
context: 200_000,
|
|
output: 8192,
|
|
},
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
release_date: "2024-01-01",
|
|
}) as any
|
|
|
|
test("puts gateway defaults under gateway key", () => {
|
|
const model = createModel("anthropic/claude-sonnet-4")
|
|
const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
|
|
expect(result).toEqual({
|
|
gateway: {
|
|
caching: "auto",
|
|
},
|
|
})
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.providerOptions", () => {
|
|
const createModel = (overrides: Partial<any> = {}) =>
|
|
({
|
|
id: "test/test-model",
|
|
providerID: "test",
|
|
api: {
|
|
id: "test-model",
|
|
url: "https://api.test.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
name: "Test Model",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: true,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: false },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: {
|
|
input: 0.001,
|
|
output: 0.002,
|
|
cache: { read: 0.0001, write: 0.0002 },
|
|
},
|
|
limit: {
|
|
context: 200_000,
|
|
output: 64_000,
|
|
},
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
release_date: "2024-01-01",
|
|
...overrides,
|
|
}) as any
|
|
|
|
test("uses sdk key for non-gateway models", () => {
|
|
const model = createModel({
|
|
providerID: "my-bedrock",
|
|
api: {
|
|
id: "anthropic.claude-sonnet-4",
|
|
url: "https://bedrock.aws",
|
|
npm: "@ai-sdk/amazon-bedrock",
|
|
},
|
|
})
|
|
|
|
expect(ProviderTransform.providerOptions(model, { cachePoint: { type: "default" } })).toEqual({
|
|
bedrock: { cachePoint: { type: "default" } },
|
|
})
|
|
})
|
|
|
|
test("uses gateway model provider slug for gateway models", () => {
|
|
const model = createModel({
|
|
providerID: "vercel",
|
|
api: {
|
|
id: "anthropic/claude-sonnet-4",
|
|
url: "https://ai-gateway.vercel.sh/v3/ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
|
|
expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
|
|
anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
|
|
})
|
|
})
|
|
|
|
test("falls back to gateway key when gateway api id is unscoped", () => {
|
|
const model = createModel({
|
|
id: "anthropic/claude-sonnet-4",
|
|
providerID: "vercel",
|
|
api: {
|
|
id: "claude-sonnet-4",
|
|
url: "https://ai-gateway.vercel.sh/v3/ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
|
|
expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
|
|
gateway: { thinking: { type: "enabled", budgetTokens: 12_000 } },
|
|
})
|
|
})
|
|
|
|
test("splits gateway routing options from provider-specific options", () => {
|
|
const model = createModel({
|
|
providerID: "vercel",
|
|
api: {
|
|
id: "anthropic/claude-sonnet-4",
|
|
url: "https://ai-gateway.vercel.sh/v3/ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
|
|
expect(
|
|
ProviderTransform.providerOptions(model, {
|
|
gateway: { order: ["vertex", "anthropic"] },
|
|
thinking: { type: "enabled", budgetTokens: 12_000 },
|
|
}),
|
|
).toEqual({
|
|
gateway: { order: ["vertex", "anthropic"] },
|
|
anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
|
|
} as any)
|
|
})
|
|
|
|
test("falls back to gateway key when model id has no provider slug", () => {
|
|
const model = createModel({
|
|
id: "claude-sonnet-4",
|
|
providerID: "vercel",
|
|
api: {
|
|
id: "claude-sonnet-4",
|
|
url: "https://ai-gateway.vercel.sh/v3/ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
|
|
expect(ProviderTransform.providerOptions(model, { reasoningEffort: "high" })).toEqual({
|
|
gateway: { reasoningEffort: "high" },
|
|
})
|
|
})
|
|
|
|
test("maps amazon slug to bedrock for provider options", () => {
|
|
const model = createModel({
|
|
providerID: "vercel",
|
|
api: {
|
|
id: "amazon/nova-2-lite",
|
|
url: "https://ai-gateway.vercel.sh/v3/ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
|
|
expect(ProviderTransform.providerOptions(model, { reasoningConfig: { type: "enabled" } })).toEqual({
|
|
bedrock: { reasoningConfig: { type: "enabled" } },
|
|
})
|
|
})
|
|
|
|
test("uses groq slug for groq models", () => {
|
|
const model = createModel({
|
|
providerID: "vercel",
|
|
api: {
|
|
id: "groq/llama-3.3-70b-versatile",
|
|
url: "https://ai-gateway.vercel.sh/v3/ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
|
|
expect(ProviderTransform.providerOptions(model, { reasoningFormat: "parsed" })).toEqual({
|
|
groq: { reasoningFormat: "parsed" },
|
|
})
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.schema - gemini array items", () => {
|
|
test("adds missing items for array properties", () => {
|
|
const geminiModel = {
|
|
providerID: "google",
|
|
api: {
|
|
id: "gemini-3-pro",
|
|
},
|
|
} as any
|
|
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
nodes: { type: "array" },
|
|
edges: { type: "array", items: { type: "string" } },
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
expect(result.properties.nodes.items).toBeDefined()
|
|
expect(result.properties.edges.items.type).toBe("string")
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.schema - gemini nested array items", () => {
|
|
const geminiModel = {
|
|
providerID: "google",
|
|
api: {
|
|
id: "gemini-3-pro",
|
|
},
|
|
} as any
|
|
|
|
test("adds type to 2D array with empty inner items", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
values: {
|
|
type: "array",
|
|
items: {
|
|
type: "array",
|
|
items: {}, // Empty items object
|
|
},
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
// Inner items should have a default type
|
|
expect(result.properties.values.items.items.type).toBe("string")
|
|
})
|
|
|
|
test("adds items and type to 2D array with missing inner items", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
data: {
|
|
type: "array",
|
|
items: { type: "array" }, // No items at all
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
expect(result.properties.data.items.items).toBeDefined()
|
|
expect(result.properties.data.items.items.type).toBe("string")
|
|
})
|
|
|
|
test("handles deeply nested arrays (3D)", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
matrix: {
|
|
type: "array",
|
|
items: {
|
|
type: "array",
|
|
items: {
|
|
type: "array",
|
|
// No items
|
|
},
|
|
},
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
expect(result.properties.matrix.items.items.items).toBeDefined()
|
|
expect(result.properties.matrix.items.items.items.type).toBe("string")
|
|
})
|
|
|
|
test("preserves existing item types in nested arrays", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
numbers: {
|
|
type: "array",
|
|
items: {
|
|
type: "array",
|
|
items: { type: "number" }, // Has explicit type
|
|
},
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
// Should preserve the explicit type
|
|
expect(result.properties.numbers.items.items.type).toBe("number")
|
|
})
|
|
|
|
test("handles mixed nested structures with objects and arrays", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
spreadsheetData: {
|
|
type: "object",
|
|
properties: {
|
|
rows: {
|
|
type: "array",
|
|
items: {
|
|
type: "array",
|
|
items: {}, // Empty items
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
expect(result.properties.spreadsheetData.properties.rows.items.items.type).toBe("string")
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.schema - gemini combiner nodes", () => {
|
|
const geminiModel = {
|
|
providerID: "google",
|
|
api: {
|
|
id: "gemini-3-pro",
|
|
},
|
|
} as any
|
|
|
|
const walk = (node: any, cb: (node: any, path: (string | number)[]) => void, path: (string | number)[] = []) => {
|
|
if (node === null || typeof node !== "object") {
|
|
return
|
|
}
|
|
if (Array.isArray(node)) {
|
|
node.forEach((item, i) => walk(item, cb, [...path, i]))
|
|
return
|
|
}
|
|
cb(node, path)
|
|
Object.entries(node).forEach(([key, value]) => walk(value, cb, [...path, key]))
|
|
}
|
|
|
|
test("keeps edits.items.anyOf without adding type", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
edits: {
|
|
type: "array",
|
|
items: {
|
|
anyOf: [
|
|
{
|
|
type: "object",
|
|
properties: {
|
|
old_string: { type: "string" },
|
|
new_string: { type: "string" },
|
|
},
|
|
required: ["old_string", "new_string"],
|
|
},
|
|
{
|
|
type: "object",
|
|
properties: {
|
|
old_string: { type: "string" },
|
|
new_string: { type: "string" },
|
|
replace_all: { type: "boolean" },
|
|
},
|
|
required: ["old_string", "new_string"],
|
|
},
|
|
],
|
|
},
|
|
},
|
|
},
|
|
required: ["edits"],
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
expect(Array.isArray(result.properties.edits.items.anyOf)).toBe(true)
|
|
expect(result.properties.edits.items.type).toBeUndefined()
|
|
})
|
|
|
|
test("does not add sibling keys to combiner nodes during sanitize", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
edits: {
|
|
type: "array",
|
|
items: {
|
|
anyOf: [{ type: "string" }, { type: "number" }],
|
|
},
|
|
},
|
|
value: {
|
|
oneOf: [{ type: "string" }, { type: "boolean" }],
|
|
},
|
|
meta: {
|
|
allOf: [
|
|
{
|
|
type: "object",
|
|
properties: { a: { type: "string" } },
|
|
},
|
|
{
|
|
type: "object",
|
|
properties: { b: { type: "string" } },
|
|
},
|
|
],
|
|
},
|
|
},
|
|
} as any
|
|
const input = JSON.parse(JSON.stringify(schema))
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
walk(result, (node, path) => {
|
|
const hasCombiner = Array.isArray(node.anyOf) || Array.isArray(node.oneOf) || Array.isArray(node.allOf)
|
|
if (!hasCombiner) {
|
|
return
|
|
}
|
|
const before = path.reduce((acc: any, key) => acc?.[key], input)
|
|
const added = Object.keys(node).filter((key) => !(key in before))
|
|
expect(added).toEqual([])
|
|
})
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.schema - gemini non-object properties removal", () => {
|
|
const geminiModel = {
|
|
providerID: "google",
|
|
api: {
|
|
id: "gemini-3-pro",
|
|
},
|
|
} as any
|
|
|
|
test("removes properties from non-object types", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
data: {
|
|
type: "string",
|
|
properties: { invalid: { type: "string" } },
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
expect(result.properties.data.type).toBe("string")
|
|
expect(result.properties.data.properties).toBeUndefined()
|
|
})
|
|
|
|
test("removes required from non-object types", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
data: {
|
|
type: "array",
|
|
items: { type: "string" },
|
|
required: ["invalid"],
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
expect(result.properties.data.type).toBe("array")
|
|
expect(result.properties.data.required).toBeUndefined()
|
|
})
|
|
|
|
test("removes properties and required from nested non-object types", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
outer: {
|
|
type: "object",
|
|
properties: {
|
|
inner: {
|
|
type: "number",
|
|
properties: { bad: { type: "string" } },
|
|
required: ["bad"],
|
|
},
|
|
},
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
expect(result.properties.outer.properties.inner.type).toBe("number")
|
|
expect(result.properties.outer.properties.inner.properties).toBeUndefined()
|
|
expect(result.properties.outer.properties.inner.required).toBeUndefined()
|
|
})
|
|
|
|
test("keeps properties and required on object types", () => {
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
data: {
|
|
type: "object",
|
|
properties: { name: { type: "string" } },
|
|
required: ["name"],
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(geminiModel, schema) as any
|
|
|
|
expect(result.properties.data.type).toBe("object")
|
|
expect(result.properties.data.properties).toBeDefined()
|
|
expect(result.properties.data.required).toEqual(["name"])
|
|
})
|
|
|
|
test("does not affect non-gemini providers", () => {
|
|
const openaiModel = {
|
|
providerID: "openai",
|
|
api: {
|
|
id: "gpt-4",
|
|
},
|
|
} as any
|
|
|
|
const schema = {
|
|
type: "object",
|
|
properties: {
|
|
data: {
|
|
type: "string",
|
|
properties: { invalid: { type: "string" } },
|
|
},
|
|
},
|
|
} as any
|
|
|
|
const result = ProviderTransform.schema(openaiModel, schema) as any
|
|
|
|
expect(result.properties.data.properties).toBeDefined()
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.message - DeepSeek reasoning content", () => {
|
|
test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{ type: "reasoning", text: "Let me think about this..." },
|
|
{
|
|
type: "tool-call",
|
|
toolCallId: "test",
|
|
toolName: "bash",
|
|
input: { command: "echo hello" },
|
|
},
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(
|
|
msgs,
|
|
{
|
|
id: ModelID.make("deepseek/deepseek-chat"),
|
|
providerID: ProviderID.make("deepseek"),
|
|
api: {
|
|
id: "deepseek-chat",
|
|
url: "https://api.deepseek.com",
|
|
npm: "@ai-sdk/openai-compatible",
|
|
},
|
|
name: "DeepSeek Chat",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: true,
|
|
attachment: false,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: {
|
|
field: "reasoning_content",
|
|
},
|
|
},
|
|
cost: {
|
|
input: 0.001,
|
|
output: 0.002,
|
|
cache: { read: 0.0001, write: 0.0002 },
|
|
},
|
|
limit: {
|
|
context: 128000,
|
|
output: 8192,
|
|
},
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
release_date: "2023-04-01",
|
|
},
|
|
{},
|
|
)
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content).toEqual([
|
|
{
|
|
type: "tool-call",
|
|
toolCallId: "test",
|
|
toolName: "bash",
|
|
input: { command: "echo hello" },
|
|
},
|
|
])
|
|
expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
|
|
})
|
|
|
|
test("Non-DeepSeek providers leave reasoning content unchanged", () => {
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{ type: "reasoning", text: "Should not be processed" },
|
|
{ type: "text", text: "Answer" },
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(
|
|
msgs,
|
|
{
|
|
id: ModelID.make("openai/gpt-4"),
|
|
providerID: ProviderID.make("openai"),
|
|
api: {
|
|
id: "gpt-4",
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
name: "GPT-4",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: false,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: false },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: {
|
|
input: 0.03,
|
|
output: 0.06,
|
|
cache: { read: 0.001, write: 0.002 },
|
|
},
|
|
limit: {
|
|
context: 128000,
|
|
output: 4096,
|
|
},
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
release_date: "2023-04-01",
|
|
},
|
|
{},
|
|
)
|
|
|
|
expect(result[0].content).toEqual([
|
|
{ type: "reasoning", text: "Should not be processed" },
|
|
{ type: "text", text: "Answer" },
|
|
])
|
|
expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.message - empty image handling", () => {
|
|
const mockModel = {
|
|
id: "anthropic/claude-3-5-sonnet",
|
|
providerID: "anthropic",
|
|
api: {
|
|
id: "claude-3-5-sonnet-20241022",
|
|
url: "https://api.anthropic.com",
|
|
npm: "@ai-sdk/anthropic",
|
|
},
|
|
name: "Claude 3.5 Sonnet",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: false,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: {
|
|
input: 0.003,
|
|
output: 0.015,
|
|
cache: { read: 0.0003, write: 0.00375 },
|
|
},
|
|
limit: {
|
|
context: 200000,
|
|
output: 8192,
|
|
},
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
} as any
|
|
|
|
test("should replace empty base64 image with error text", () => {
|
|
const msgs = [
|
|
{
|
|
role: "user",
|
|
content: [
|
|
{ type: "text", text: "What is in this image?" },
|
|
{ type: "image", image: "data:image/png;base64," },
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, mockModel, {})
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content).toHaveLength(2)
|
|
expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
|
|
expect(result[0].content[1]).toEqual({
|
|
type: "text",
|
|
text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
|
|
})
|
|
})
|
|
|
|
test("should keep valid base64 images unchanged", () => {
|
|
const validBase64 =
|
|
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
|
|
const msgs = [
|
|
{
|
|
role: "user",
|
|
content: [
|
|
{ type: "text", text: "What is in this image?" },
|
|
{ type: "image", image: `data:image/png;base64,${validBase64}` },
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, mockModel, {})
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content).toHaveLength(2)
|
|
expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
|
|
expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
|
|
})
|
|
|
|
test("should handle mixed valid and empty images", () => {
|
|
const validBase64 =
|
|
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
|
|
const msgs = [
|
|
{
|
|
role: "user",
|
|
content: [
|
|
{ type: "text", text: "Compare these images" },
|
|
{ type: "image", image: `data:image/png;base64,${validBase64}` },
|
|
{ type: "image", image: "data:image/jpeg;base64," },
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, mockModel, {})
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content).toHaveLength(3)
|
|
expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
|
|
expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
|
|
expect(result[0].content[2]).toEqual({
|
|
type: "text",
|
|
text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
|
|
})
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.message - anthropic empty content filtering", () => {
|
|
const anthropicModel = {
|
|
id: "anthropic/claude-3-5-sonnet",
|
|
providerID: "anthropic",
|
|
api: {
|
|
id: "claude-3-5-sonnet-20241022",
|
|
url: "https://api.anthropic.com",
|
|
npm: "@ai-sdk/anthropic",
|
|
},
|
|
name: "Claude 3.5 Sonnet",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: false,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: {
|
|
input: 0.003,
|
|
output: 0.015,
|
|
cache: { read: 0.0003, write: 0.00375 },
|
|
},
|
|
limit: {
|
|
context: 200000,
|
|
output: 8192,
|
|
},
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
} as any
|
|
|
|
test("filters out messages with empty string content", () => {
|
|
const msgs = [
|
|
{ role: "user", content: "Hello" },
|
|
{ role: "assistant", content: "" },
|
|
{ role: "user", content: "World" },
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
|
|
|
expect(result).toHaveLength(2)
|
|
expect(result[0].content).toBe("Hello")
|
|
expect(result[1].content).toBe("World")
|
|
})
|
|
|
|
test("filters out empty text parts from array content", () => {
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{ type: "text", text: "" },
|
|
{ type: "text", text: "Hello" },
|
|
{ type: "text", text: "" },
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content).toHaveLength(1)
|
|
expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
|
|
})
|
|
|
|
test("filters out empty reasoning parts from array content", () => {
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{ type: "reasoning", text: "" },
|
|
{ type: "text", text: "Answer" },
|
|
{ type: "reasoning", text: "" },
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content).toHaveLength(1)
|
|
expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
|
|
})
|
|
|
|
test("removes entire message when all parts are empty", () => {
|
|
const msgs = [
|
|
{ role: "user", content: "Hello" },
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{ type: "text", text: "" },
|
|
{ type: "reasoning", text: "" },
|
|
],
|
|
},
|
|
{ role: "user", content: "World" },
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
|
|
|
expect(result).toHaveLength(2)
|
|
expect(result[0].content).toBe("Hello")
|
|
expect(result[1].content).toBe("World")
|
|
})
|
|
|
|
test("keeps non-text/reasoning parts even if text parts are empty", () => {
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{ type: "text", text: "" },
|
|
{ type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content).toHaveLength(1)
|
|
expect(result[0].content[0]).toEqual({
|
|
type: "tool-call",
|
|
toolCallId: "123",
|
|
toolName: "bash",
|
|
input: { command: "ls" },
|
|
})
|
|
})
|
|
|
|
test("keeps messages with valid text alongside empty parts", () => {
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{ type: "reasoning", text: "Thinking..." },
|
|
{ type: "text", text: "" },
|
|
{ type: "text", text: "Result" },
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content).toHaveLength(2)
|
|
expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
|
|
expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
|
|
})
|
|
|
|
test("filters empty content for bedrock provider", () => {
|
|
const bedrockModel = {
|
|
...anthropicModel,
|
|
id: "amazon-bedrock/anthropic.claude-opus-4-6",
|
|
providerID: "amazon-bedrock",
|
|
api: {
|
|
id: "anthropic.claude-opus-4-6",
|
|
url: "https://bedrock-runtime.us-east-1.amazonaws.com",
|
|
npm: "@ai-sdk/amazon-bedrock",
|
|
},
|
|
}
|
|
|
|
const msgs = [
|
|
{ role: "user", content: "Hello" },
|
|
{ role: "assistant", content: "" },
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{ type: "text", text: "" },
|
|
{ type: "text", text: "Answer" },
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, bedrockModel, {})
|
|
|
|
expect(result).toHaveLength(2)
|
|
expect(result[0].content).toBe("Hello")
|
|
expect(result[1].content).toHaveLength(1)
|
|
expect(result[1].content[0]).toEqual({ type: "text", text: "Answer" })
|
|
})
|
|
|
|
test("does not filter for non-anthropic providers", () => {
|
|
const openaiModel = {
|
|
...anthropicModel,
|
|
providerID: "openai",
|
|
api: {
|
|
id: "gpt-4",
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
}
|
|
|
|
const msgs = [
|
|
{ role: "assistant", content: "" },
|
|
{
|
|
role: "assistant",
|
|
content: [{ type: "text", text: "" }],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, openaiModel, {})
|
|
|
|
expect(result).toHaveLength(2)
|
|
expect(result[0].content).toBe("")
|
|
expect(result[1].content).toHaveLength(1)
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.message - strip openai metadata when store=false", () => {
|
|
const openaiModel = {
|
|
id: "openai/gpt-5",
|
|
providerID: "openai",
|
|
api: {
|
|
id: "gpt-5",
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
name: "GPT-5",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: true,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: false },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
|
|
limit: { context: 128000, output: 4096 },
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
} as any
|
|
|
|
test("preserves itemId and reasoningEncryptedContent when store=false", () => {
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{
|
|
type: "reasoning",
|
|
text: "thinking...",
|
|
providerOptions: {
|
|
openai: {
|
|
itemId: "rs_123",
|
|
reasoningEncryptedContent: "encrypted",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
type: "text",
|
|
text: "Hello",
|
|
providerOptions: {
|
|
openai: {
|
|
itemId: "msg_456",
|
|
},
|
|
},
|
|
},
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
|
|
expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
|
|
})
|
|
|
|
test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
|
|
const zenModel = {
|
|
...openaiModel,
|
|
providerID: "zen",
|
|
}
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{
|
|
type: "reasoning",
|
|
text: "thinking...",
|
|
providerOptions: {
|
|
openai: {
|
|
itemId: "rs_123",
|
|
reasoningEncryptedContent: "encrypted",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
type: "text",
|
|
text: "Hello",
|
|
providerOptions: {
|
|
openai: {
|
|
itemId: "msg_456",
|
|
},
|
|
},
|
|
},
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
|
|
|
|
expect(result).toHaveLength(1)
|
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
|
|
expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
|
|
})
|
|
|
|
test("preserves other openai options including itemId", () => {
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{
|
|
type: "text",
|
|
text: "Hello",
|
|
providerOptions: {
|
|
openai: {
|
|
itemId: "msg_123",
|
|
otherOption: "value",
|
|
},
|
|
},
|
|
},
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
|
|
|
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
|
|
expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
|
|
})
|
|
|
|
test("preserves metadata for openai package when store is true", () => {
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{
|
|
type: "text",
|
|
text: "Hello",
|
|
providerOptions: {
|
|
openai: {
|
|
itemId: "msg_123",
|
|
},
|
|
},
|
|
},
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
// openai package preserves itemId regardless of store value
|
|
const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
|
|
|
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
|
|
})
|
|
|
|
test("preserves metadata for non-openai packages when store is false", () => {
|
|
const anthropicModel = {
|
|
...openaiModel,
|
|
providerID: "anthropic",
|
|
api: {
|
|
id: "claude-3",
|
|
url: "https://api.anthropic.com",
|
|
npm: "@ai-sdk/anthropic",
|
|
},
|
|
}
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{
|
|
type: "text",
|
|
text: "Hello",
|
|
providerOptions: {
|
|
openai: {
|
|
itemId: "msg_123",
|
|
},
|
|
},
|
|
},
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
// store=false preserves metadata for non-openai packages
|
|
const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
|
|
|
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
|
|
})
|
|
|
|
test("preserves metadata using providerID key when store is false", () => {
|
|
const opencodeModel = {
|
|
...openaiModel,
|
|
providerID: "opencode",
|
|
api: {
|
|
id: "opencode-test",
|
|
url: "https://api.opencode.ai",
|
|
npm: "@ai-sdk/openai-compatible",
|
|
},
|
|
}
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{
|
|
type: "text",
|
|
text: "Hello",
|
|
providerOptions: {
|
|
opencode: {
|
|
itemId: "msg_123",
|
|
otherOption: "value",
|
|
},
|
|
},
|
|
},
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
|
|
|
|
expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123")
|
|
expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value")
|
|
})
|
|
|
|
test("preserves itemId across all providerOptions keys", () => {
|
|
const opencodeModel = {
|
|
...openaiModel,
|
|
providerID: "opencode",
|
|
api: {
|
|
id: "opencode-test",
|
|
url: "https://api.opencode.ai",
|
|
npm: "@ai-sdk/openai-compatible",
|
|
},
|
|
}
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
providerOptions: {
|
|
openai: { itemId: "msg_root" },
|
|
opencode: { itemId: "msg_opencode" },
|
|
extra: { itemId: "msg_extra" },
|
|
},
|
|
content: [
|
|
{
|
|
type: "text",
|
|
text: "Hello",
|
|
providerOptions: {
|
|
openai: { itemId: "msg_openai_part" },
|
|
opencode: { itemId: "msg_opencode_part" },
|
|
extra: { itemId: "msg_extra_part" },
|
|
},
|
|
},
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
|
|
|
|
expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
|
|
expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode")
|
|
expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
|
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
|
|
expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part")
|
|
expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
|
|
})
|
|
|
|
test("does not strip metadata for non-openai packages when store is not false", () => {
|
|
const anthropicModel = {
|
|
...openaiModel,
|
|
providerID: "anthropic",
|
|
api: {
|
|
id: "claude-3",
|
|
url: "https://api.anthropic.com",
|
|
npm: "@ai-sdk/anthropic",
|
|
},
|
|
}
|
|
const msgs = [
|
|
{
|
|
role: "assistant",
|
|
content: [
|
|
{
|
|
type: "text",
|
|
text: "Hello",
|
|
providerOptions: {
|
|
openai: {
|
|
itemId: "msg_123",
|
|
},
|
|
},
|
|
},
|
|
],
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
|
|
|
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.message - providerOptions key remapping", () => {
|
|
const createModel = (providerID: string, npm: string) =>
|
|
({
|
|
id: `${providerID}/test-model`,
|
|
providerID,
|
|
api: {
|
|
id: "test-model",
|
|
url: "https://api.test.com",
|
|
npm,
|
|
},
|
|
name: "Test Model",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: false,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
|
|
limit: { context: 128000, output: 8192 },
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
}) as any
|
|
|
|
test("azure keeps 'azure' key and does not remap to 'openai'", () => {
|
|
const model = createModel("azure", "@ai-sdk/azure")
|
|
const msgs = [
|
|
{
|
|
role: "user",
|
|
content: "Hello",
|
|
providerOptions: {
|
|
azure: { someOption: "value" },
|
|
},
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, model, {})
|
|
|
|
expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
|
|
expect(result[0].providerOptions?.openai).toBeUndefined()
|
|
})
|
|
|
|
test("copilot remaps providerID to 'copilot' key", () => {
|
|
const model = createModel("github-copilot", "@ai-sdk/github-copilot")
|
|
const msgs = [
|
|
{
|
|
role: "user",
|
|
content: "Hello",
|
|
providerOptions: {
|
|
copilot: { someOption: "value" },
|
|
},
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, model, {})
|
|
|
|
expect(result[0].providerOptions?.copilot).toEqual({ someOption: "value" })
|
|
expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
|
|
})
|
|
|
|
test("bedrock remaps providerID to 'bedrock' key", () => {
|
|
const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
|
|
const msgs = [
|
|
{
|
|
role: "user",
|
|
content: "Hello",
|
|
providerOptions: {
|
|
"my-bedrock": { someOption: "value" },
|
|
},
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, model, {})
|
|
|
|
expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
|
|
expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
|
|
test("adds cachePoint", () => {
|
|
const model = {
|
|
id: "amazon-bedrock/custom-claude-sonnet-4.5",
|
|
providerID: "amazon-bedrock",
|
|
api: {
|
|
id: "arn:aws:bedrock:xxx:yyy:application-inference-profile/zzz",
|
|
url: "https://api.test.com",
|
|
npm: "@ai-sdk/amazon-bedrock",
|
|
},
|
|
name: "Custom inference profile",
|
|
capabilities: {},
|
|
options: {},
|
|
headers: {},
|
|
} as any
|
|
|
|
const msgs = [
|
|
{
|
|
role: "user",
|
|
content: "Hello",
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, model, {})
|
|
|
|
expect(result[0].providerOptions?.bedrock).toEqual(
|
|
expect.objectContaining({
|
|
cachePoint: {
|
|
type: "default",
|
|
},
|
|
}),
|
|
)
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.message - cache control on gateway", () => {
|
|
const createModel = (overrides: Partial<any> = {}) =>
|
|
({
|
|
id: "anthropic/claude-sonnet-4",
|
|
providerID: "vercel",
|
|
api: {
|
|
id: "anthropic/claude-sonnet-4",
|
|
url: "https://ai-gateway.vercel.sh/v3/ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
name: "Claude Sonnet 4",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: true,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
|
|
limit: { context: 200_000, output: 8192 },
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
...overrides,
|
|
}) as any
|
|
|
|
test("gateway does not set cache control for anthropic models", () => {
|
|
const model = createModel()
|
|
const msgs = [
|
|
{
|
|
role: "system",
|
|
content: [{ type: "text", text: "You are a helpful assistant" }],
|
|
},
|
|
{
|
|
role: "user",
|
|
content: "Hello",
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, model, {}) as any[]
|
|
|
|
expect(result[0].content[0].providerOptions).toBeUndefined()
|
|
expect(result[0].providerOptions).toBeUndefined()
|
|
})
|
|
|
|
test("non-gateway anthropic keeps existing cache control behavior", () => {
|
|
const model = createModel({
|
|
providerID: "anthropic",
|
|
api: {
|
|
id: "claude-sonnet-4",
|
|
url: "https://api.anthropic.com",
|
|
npm: "@ai-sdk/anthropic",
|
|
},
|
|
})
|
|
const msgs = [
|
|
{
|
|
role: "system",
|
|
content: "You are a helpful assistant",
|
|
},
|
|
{
|
|
role: "user",
|
|
content: "Hello",
|
|
},
|
|
] as any[]
|
|
|
|
const result = ProviderTransform.message(msgs, model, {}) as any[]
|
|
|
|
expect(result[0].providerOptions).toEqual({
|
|
anthropic: {
|
|
cacheControl: {
|
|
type: "ephemeral",
|
|
},
|
|
},
|
|
openrouter: {
|
|
cacheControl: {
|
|
type: "ephemeral",
|
|
},
|
|
},
|
|
bedrock: {
|
|
cachePoint: {
|
|
type: "default",
|
|
},
|
|
},
|
|
openaiCompatible: {
|
|
cache_control: {
|
|
type: "ephemeral",
|
|
},
|
|
},
|
|
copilot: {
|
|
copilot_cache_control: {
|
|
type: "ephemeral",
|
|
},
|
|
},
|
|
})
|
|
})
|
|
})
|
|
|
|
describe("ProviderTransform.variants", () => {
|
|
const createMockModel = (overrides: Partial<any> = {}): any => ({
|
|
id: "test/test-model",
|
|
providerID: "test",
|
|
api: {
|
|
id: "test-model",
|
|
url: "https://api.test.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
name: "Test Model",
|
|
capabilities: {
|
|
temperature: true,
|
|
reasoning: true,
|
|
attachment: true,
|
|
toolcall: true,
|
|
input: { text: true, audio: false, image: true, video: false, pdf: false },
|
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
interleaved: false,
|
|
},
|
|
cost: {
|
|
input: 0.001,
|
|
output: 0.002,
|
|
cache: { read: 0.0001, write: 0.0002 },
|
|
},
|
|
limit: {
|
|
context: 200_000,
|
|
output: 64_000,
|
|
},
|
|
status: "active",
|
|
options: {},
|
|
headers: {},
|
|
release_date: "2024-01-01",
|
|
...overrides,
|
|
})
|
|
|
|
test("returns empty object when model has no reasoning capabilities", () => {
|
|
const model = createMockModel({
|
|
capabilities: { reasoning: false },
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("deepseek returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "deepseek/deepseek-chat",
|
|
providerID: "deepseek",
|
|
api: {
|
|
id: "deepseek-chat",
|
|
url: "https://api.deepseek.com",
|
|
npm: "@ai-sdk/openai-compatible",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("minimax returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "minimax/minimax-model",
|
|
providerID: "minimax",
|
|
api: {
|
|
id: "minimax-model",
|
|
url: "https://api.minimax.com",
|
|
npm: "@ai-sdk/openai-compatible",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("glm returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "glm/glm-4",
|
|
providerID: "glm",
|
|
api: {
|
|
id: "glm-4",
|
|
url: "https://api.glm.com",
|
|
npm: "@ai-sdk/openai-compatible",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("mistral returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "mistral/mistral-large",
|
|
providerID: "mistral",
|
|
api: {
|
|
id: "mistral-large-latest",
|
|
url: "https://api.mistral.com",
|
|
npm: "@ai-sdk/mistral",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
describe("@openrouter/ai-sdk-provider", () => {
|
|
test("returns empty object for non-qualifying models", () => {
|
|
const model = createMockModel({
|
|
id: "openrouter/test-model",
|
|
providerID: "openrouter",
|
|
api: {
|
|
id: "test-model",
|
|
url: "https://openrouter.ai",
|
|
npm: "@openrouter/ai-sdk-provider",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("gpt models return OPENAI_EFFORTS with reasoning", () => {
|
|
const model = createMockModel({
|
|
id: "openrouter/gpt-4",
|
|
providerID: "openrouter",
|
|
api: {
|
|
id: "gpt-4",
|
|
url: "https://openrouter.ai",
|
|
npm: "@openrouter/ai-sdk-provider",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
|
|
expect(result.low).toEqual({ reasoning: { effort: "low" } })
|
|
expect(result.high).toEqual({ reasoning: { effort: "high" } })
|
|
})
|
|
|
|
test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
|
|
const model = createMockModel({
|
|
id: "openrouter/gemini-3-5-pro",
|
|
providerID: "openrouter",
|
|
api: {
|
|
id: "gemini-3-5-pro",
|
|
url: "https://openrouter.ai",
|
|
npm: "@openrouter/ai-sdk-provider",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
|
|
})
|
|
|
|
test("grok-4 returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "openrouter/grok-4",
|
|
providerID: "openrouter",
|
|
api: {
|
|
id: "grok-4",
|
|
url: "https://openrouter.ai",
|
|
npm: "@openrouter/ai-sdk-provider",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("grok-3-mini returns low and high with reasoning", () => {
|
|
const model = createMockModel({
|
|
id: "openrouter/grok-3-mini",
|
|
providerID: "openrouter",
|
|
api: {
|
|
id: "grok-3-mini",
|
|
url: "https://openrouter.ai",
|
|
npm: "@openrouter/ai-sdk-provider",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "high"])
|
|
expect(result.low).toEqual({ reasoning: { effort: "low" } })
|
|
expect(result.high).toEqual({ reasoning: { effort: "high" } })
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/gateway", () => {
|
|
test("anthropic sonnet 4.6 models return adaptive thinking options", () => {
|
|
const model = createMockModel({
|
|
id: "anthropic/claude-sonnet-4-6",
|
|
providerID: "gateway",
|
|
api: {
|
|
id: "anthropic/claude-sonnet-4-6",
|
|
url: "https://gateway.ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
|
|
expect(result.medium).toEqual({
|
|
thinking: {
|
|
type: "adaptive",
|
|
},
|
|
effort: "medium",
|
|
})
|
|
})
|
|
|
|
test("anthropic sonnet 4.6 dot-format models return adaptive thinking options", () => {
|
|
const model = createMockModel({
|
|
id: "anthropic/claude-sonnet-4-6",
|
|
providerID: "gateway",
|
|
api: {
|
|
id: "anthropic/claude-sonnet-4.6",
|
|
url: "https://gateway.ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
|
|
expect(result.medium).toEqual({
|
|
thinking: {
|
|
type: "adaptive",
|
|
},
|
|
effort: "medium",
|
|
})
|
|
})
|
|
|
|
test("anthropic opus 4.6 dot-format models return adaptive thinking options", () => {
|
|
const model = createMockModel({
|
|
id: "anthropic/claude-opus-4-6",
|
|
providerID: "gateway",
|
|
api: {
|
|
id: "anthropic/claude-opus-4.6",
|
|
url: "https://gateway.ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
|
|
expect(result.high).toEqual({
|
|
thinking: {
|
|
type: "adaptive",
|
|
},
|
|
effort: "high",
|
|
})
|
|
})
|
|
|
|
test("anthropic models return anthropic thinking options", () => {
|
|
const model = createMockModel({
|
|
id: "anthropic/claude-sonnet-4",
|
|
providerID: "gateway",
|
|
api: {
|
|
id: "anthropic/claude-sonnet-4",
|
|
url: "https://gateway.ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["high", "max"])
|
|
expect(result.high).toEqual({
|
|
thinking: {
|
|
type: "enabled",
|
|
budgetTokens: 16000,
|
|
},
|
|
})
|
|
expect(result.max).toEqual({
|
|
thinking: {
|
|
type: "enabled",
|
|
budgetTokens: 31999,
|
|
},
|
|
})
|
|
})
|
|
|
|
test("returns OPENAI_EFFORTS with reasoningEffort", () => {
|
|
const model = createMockModel({
|
|
id: "gateway/gateway-model",
|
|
providerID: "gateway",
|
|
api: {
|
|
id: "gateway-model",
|
|
url: "https://gateway.ai",
|
|
npm: "@ai-sdk/gateway",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
|
|
expect(result.low).toEqual({ reasoningEffort: "low" })
|
|
expect(result.high).toEqual({ reasoningEffort: "high" })
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/github-copilot", () => {
|
|
test("standard models return low, medium, high", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-4.5",
|
|
providerID: "github-copilot",
|
|
api: {
|
|
id: "gpt-4.5",
|
|
url: "https://api.githubcopilot.com",
|
|
npm: "@ai-sdk/github-copilot",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
expect(result.low).toEqual({
|
|
reasoningEffort: "low",
|
|
reasoningSummary: "auto",
|
|
include: ["reasoning.encrypted_content"],
|
|
})
|
|
})
|
|
|
|
test("gpt-5.1-codex-max includes xhigh", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5.1-codex-max",
|
|
providerID: "github-copilot",
|
|
api: {
|
|
id: "gpt-5.1-codex-max",
|
|
url: "https://api.githubcopilot.com",
|
|
npm: "@ai-sdk/github-copilot",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
|
|
})
|
|
|
|
test("gpt-5.1-codex-mini does not include xhigh", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5.1-codex-mini",
|
|
providerID: "github-copilot",
|
|
api: {
|
|
id: "gpt-5.1-codex-mini",
|
|
url: "https://api.githubcopilot.com",
|
|
npm: "@ai-sdk/github-copilot",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
})
|
|
|
|
test("gpt-5.1-codex does not include xhigh", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5.1-codex",
|
|
providerID: "github-copilot",
|
|
api: {
|
|
id: "gpt-5.1-codex",
|
|
url: "https://api.githubcopilot.com",
|
|
npm: "@ai-sdk/github-copilot",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
})
|
|
|
|
test("gpt-5.2 includes xhigh", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5.2",
|
|
providerID: "github-copilot",
|
|
api: {
|
|
id: "gpt-5.2",
|
|
url: "https://api.githubcopilot.com",
|
|
npm: "@ai-sdk/github-copilot",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
|
|
expect(result.xhigh).toEqual({
|
|
reasoningEffort: "xhigh",
|
|
reasoningSummary: "auto",
|
|
include: ["reasoning.encrypted_content"],
|
|
})
|
|
})
|
|
|
|
test("gpt-5.2-codex includes xhigh", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5.2-codex",
|
|
providerID: "github-copilot",
|
|
api: {
|
|
id: "gpt-5.2-codex",
|
|
url: "https://api.githubcopilot.com",
|
|
npm: "@ai-sdk/github-copilot",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
|
|
})
|
|
|
|
test("gpt-5.3-codex includes xhigh", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5.3-codex",
|
|
providerID: "github-copilot",
|
|
api: {
|
|
id: "gpt-5.3-codex",
|
|
url: "https://api.githubcopilot.com",
|
|
npm: "@ai-sdk/github-copilot",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
|
|
})
|
|
|
|
test("gpt-5.4 includes xhigh", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5.4",
|
|
release_date: "2026-03-05",
|
|
providerID: "github-copilot",
|
|
api: {
|
|
id: "gpt-5.4",
|
|
url: "https://api.githubcopilot.com",
|
|
npm: "@ai-sdk/github-copilot",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/cerebras", () => {
|
|
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
|
|
const model = createMockModel({
|
|
id: "cerebras/llama-4",
|
|
providerID: "cerebras",
|
|
api: {
|
|
id: "llama-4-sc",
|
|
url: "https://api.cerebras.ai",
|
|
npm: "@ai-sdk/cerebras",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
expect(result.low).toEqual({ reasoningEffort: "low" })
|
|
expect(result.high).toEqual({ reasoningEffort: "high" })
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/togetherai", () => {
|
|
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
|
|
const model = createMockModel({
|
|
id: "togetherai/llama-4",
|
|
providerID: "togetherai",
|
|
api: {
|
|
id: "llama-4-sc",
|
|
url: "https://api.togetherai.com",
|
|
npm: "@ai-sdk/togetherai",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
expect(result.low).toEqual({ reasoningEffort: "low" })
|
|
expect(result.high).toEqual({ reasoningEffort: "high" })
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/xai", () => {
|
|
test("grok-3 returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "xai/grok-3",
|
|
providerID: "xai",
|
|
api: {
|
|
id: "grok-3",
|
|
url: "https://api.x.ai",
|
|
npm: "@ai-sdk/xai",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("grok-3-mini returns low and high with reasoningEffort", () => {
|
|
const model = createMockModel({
|
|
id: "xai/grok-3-mini",
|
|
providerID: "xai",
|
|
api: {
|
|
id: "grok-3-mini",
|
|
url: "https://api.x.ai",
|
|
npm: "@ai-sdk/xai",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "high"])
|
|
expect(result.low).toEqual({ reasoningEffort: "low" })
|
|
expect(result.high).toEqual({ reasoningEffort: "high" })
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/deepinfra", () => {
|
|
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
|
|
const model = createMockModel({
|
|
id: "deepinfra/llama-4",
|
|
providerID: "deepinfra",
|
|
api: {
|
|
id: "llama-4-sc",
|
|
url: "https://api.deepinfra.com",
|
|
npm: "@ai-sdk/deepinfra",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
expect(result.low).toEqual({ reasoningEffort: "low" })
|
|
expect(result.high).toEqual({ reasoningEffort: "high" })
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/openai-compatible", () => {
|
|
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
|
|
const model = createMockModel({
|
|
id: "custom-provider/custom-model",
|
|
providerID: "custom-provider",
|
|
api: {
|
|
id: "custom-model",
|
|
url: "https://api.custom.com",
|
|
npm: "@ai-sdk/openai-compatible",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
expect(result.low).toEqual({ reasoningEffort: "low" })
|
|
expect(result.high).toEqual({ reasoningEffort: "high" })
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/azure", () => {
|
|
test("o1-mini returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "o1-mini",
|
|
providerID: "azure",
|
|
api: {
|
|
id: "o1-mini",
|
|
url: "https://azure.com",
|
|
npm: "@ai-sdk/azure",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("standard azure models return custom efforts with reasoningSummary", () => {
|
|
const model = createMockModel({
|
|
id: "o1",
|
|
providerID: "azure",
|
|
api: {
|
|
id: "o1",
|
|
url: "https://azure.com",
|
|
npm: "@ai-sdk/azure",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
expect(result.low).toEqual({
|
|
reasoningEffort: "low",
|
|
reasoningSummary: "auto",
|
|
include: ["reasoning.encrypted_content"],
|
|
})
|
|
})
|
|
|
|
test("gpt-5 adds minimal effort", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5",
|
|
providerID: "azure",
|
|
api: {
|
|
id: "gpt-5",
|
|
url: "https://azure.com",
|
|
npm: "@ai-sdk/azure",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/openai", () => {
|
|
test("gpt-5-pro returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5-pro",
|
|
providerID: "openai",
|
|
api: {
|
|
id: "gpt-5-pro",
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("standard openai models return custom efforts with reasoningSummary", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5",
|
|
providerID: "openai",
|
|
api: {
|
|
id: "gpt-5",
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
release_date: "2024-06-01",
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
|
|
expect(result.low).toEqual({
|
|
reasoningEffort: "low",
|
|
reasoningSummary: "auto",
|
|
include: ["reasoning.encrypted_content"],
|
|
})
|
|
})
|
|
|
|
test("models after 2025-11-13 include 'none' effort", () => {
|
|
const model = createMockModel({
|
|
id: "gpt-5-nano",
|
|
providerID: "openai",
|
|
api: {
|
|
id: "gpt-5-nano",
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
release_date: "2025-11-14",
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
|
|
})
|
|
|
|
test("models after 2025-12-04 include 'xhigh' effort", () => {
|
|
const model = createMockModel({
|
|
id: "openai/gpt-5-chat",
|
|
providerID: "openai",
|
|
api: {
|
|
id: "gpt-5-chat",
|
|
url: "https://api.openai.com",
|
|
npm: "@ai-sdk/openai",
|
|
},
|
|
release_date: "2025-12-05",
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/anthropic", () => {
|
|
test("sonnet 4.6 returns adaptive thinking options", () => {
|
|
const model = createMockModel({
|
|
id: "anthropic/claude-sonnet-4-6",
|
|
providerID: "anthropic",
|
|
api: {
|
|
id: "claude-sonnet-4-6",
|
|
url: "https://api.anthropic.com",
|
|
npm: "@ai-sdk/anthropic",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
|
|
expect(result.high).toEqual({
|
|
thinking: {
|
|
type: "adaptive",
|
|
},
|
|
effort: "high",
|
|
})
|
|
})
|
|
|
|
test("returns high and max with thinking config", () => {
|
|
const model = createMockModel({
|
|
id: "anthropic/claude-4",
|
|
providerID: "anthropic",
|
|
api: {
|
|
id: "claude-4",
|
|
url: "https://api.anthropic.com",
|
|
npm: "@ai-sdk/anthropic",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["high", "max"])
|
|
expect(result.high).toEqual({
|
|
thinking: {
|
|
type: "enabled",
|
|
budgetTokens: 16000,
|
|
},
|
|
})
|
|
expect(result.max).toEqual({
|
|
thinking: {
|
|
type: "enabled",
|
|
budgetTokens: 31999,
|
|
},
|
|
})
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/amazon-bedrock", () => {
|
|
test("anthropic sonnet 4.6 returns adaptive reasoning options", () => {
|
|
const model = createMockModel({
|
|
id: "bedrock/anthropic-claude-sonnet-4-6",
|
|
providerID: "bedrock",
|
|
api: {
|
|
id: "anthropic.claude-sonnet-4-6",
|
|
url: "https://bedrock.amazonaws.com",
|
|
npm: "@ai-sdk/amazon-bedrock",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
|
|
expect(result.max).toEqual({
|
|
reasoningConfig: {
|
|
type: "adaptive",
|
|
maxReasoningEffort: "max",
|
|
},
|
|
})
|
|
})
|
|
|
|
test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
|
|
const model = createMockModel({
|
|
id: "bedrock/llama-4",
|
|
providerID: "bedrock",
|
|
api: {
|
|
id: "llama-4-sc",
|
|
url: "https://bedrock.amazonaws.com",
|
|
npm: "@ai-sdk/amazon-bedrock",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
expect(result.low).toEqual({
|
|
reasoningConfig: {
|
|
type: "enabled",
|
|
maxReasoningEffort: "low",
|
|
},
|
|
})
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/google", () => {
|
|
test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
|
|
const model = createMockModel({
|
|
id: "google/gemini-2.5-pro",
|
|
providerID: "google",
|
|
api: {
|
|
id: "gemini-2.5-pro",
|
|
url: "https://generativelanguage.googleapis.com",
|
|
npm: "@ai-sdk/google",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["high", "max"])
|
|
expect(result.high).toEqual({
|
|
thinkingConfig: {
|
|
includeThoughts: true,
|
|
thinkingBudget: 16000,
|
|
},
|
|
})
|
|
expect(result.max).toEqual({
|
|
thinkingConfig: {
|
|
includeThoughts: true,
|
|
thinkingBudget: 24576,
|
|
},
|
|
})
|
|
})
|
|
|
|
test("other gemini models return low and high with thinkingLevel", () => {
|
|
const model = createMockModel({
|
|
id: "google/gemini-2.0-pro",
|
|
providerID: "google",
|
|
api: {
|
|
id: "gemini-2.0-pro",
|
|
url: "https://generativelanguage.googleapis.com",
|
|
npm: "@ai-sdk/google",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "high"])
|
|
expect(result.low).toEqual({
|
|
thinkingConfig: {
|
|
includeThoughts: true,
|
|
thinkingLevel: "low",
|
|
},
|
|
})
|
|
expect(result.high).toEqual({
|
|
thinkingConfig: {
|
|
includeThoughts: true,
|
|
thinkingLevel: "high",
|
|
},
|
|
})
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/google-vertex", () => {
|
|
test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
|
|
const model = createMockModel({
|
|
id: "google-vertex/gemini-2.5-pro",
|
|
providerID: "google-vertex",
|
|
api: {
|
|
id: "gemini-2.5-pro",
|
|
url: "https://vertexai.googleapis.com",
|
|
npm: "@ai-sdk/google-vertex",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["high", "max"])
|
|
})
|
|
|
|
test("other vertex models return low and high with thinkingLevel", () => {
|
|
const model = createMockModel({
|
|
id: "google-vertex/gemini-2.0-pro",
|
|
providerID: "google-vertex",
|
|
api: {
|
|
id: "gemini-2.0-pro",
|
|
url: "https://vertexai.googleapis.com",
|
|
npm: "@ai-sdk/google-vertex",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "high"])
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/cohere", () => {
|
|
test("returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "cohere/command-r",
|
|
providerID: "cohere",
|
|
api: {
|
|
id: "command-r",
|
|
url: "https://api.cohere.com",
|
|
npm: "@ai-sdk/cohere",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/groq", () => {
|
|
test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
|
|
const model = createMockModel({
|
|
id: "groq/llama-4",
|
|
providerID: "groq",
|
|
api: {
|
|
id: "llama-4-sc",
|
|
url: "https://api.groq.com",
|
|
npm: "@ai-sdk/groq",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
|
|
expect(result.none).toEqual({
|
|
reasoningEffort: "none",
|
|
})
|
|
expect(result.low).toEqual({
|
|
reasoningEffort: "low",
|
|
})
|
|
})
|
|
})
|
|
|
|
describe("@ai-sdk/perplexity", () => {
|
|
test("returns empty object", () => {
|
|
const model = createMockModel({
|
|
id: "perplexity/sonar-plus",
|
|
providerID: "perplexity",
|
|
api: {
|
|
id: "sonar-plus",
|
|
url: "https://api.perplexity.ai",
|
|
npm: "@ai-sdk/perplexity",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
})
|
|
|
|
describe("@jerome-benoit/sap-ai-provider-v2", () => {
|
|
test("anthropic models return thinking variants", () => {
|
|
const model = createMockModel({
|
|
id: "sap-ai-core/anthropic--claude-sonnet-4",
|
|
providerID: "sap-ai-core",
|
|
api: {
|
|
id: "anthropic--claude-sonnet-4",
|
|
url: "https://api.ai.sap",
|
|
npm: "@jerome-benoit/sap-ai-provider-v2",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["high", "max"])
|
|
expect(result.high).toEqual({
|
|
thinking: {
|
|
type: "enabled",
|
|
budgetTokens: 16000,
|
|
},
|
|
})
|
|
expect(result.max).toEqual({
|
|
thinking: {
|
|
type: "enabled",
|
|
budgetTokens: 31999,
|
|
},
|
|
})
|
|
})
|
|
|
|
test("anthropic 4.6 models return adaptive thinking variants", () => {
|
|
const model = createMockModel({
|
|
id: "sap-ai-core/anthropic--claude-sonnet-4-6",
|
|
providerID: "sap-ai-core",
|
|
api: {
|
|
id: "anthropic--claude-sonnet-4-6",
|
|
url: "https://api.ai.sap",
|
|
npm: "@jerome-benoit/sap-ai-provider-v2",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
|
|
expect(result.low).toEqual({
|
|
thinking: {
|
|
type: "adaptive",
|
|
},
|
|
effort: "low",
|
|
})
|
|
expect(result.max).toEqual({
|
|
thinking: {
|
|
type: "adaptive",
|
|
},
|
|
effort: "max",
|
|
})
|
|
})
|
|
|
|
test("gemini 2.5 models return thinkingConfig variants", () => {
|
|
const model = createMockModel({
|
|
id: "sap-ai-core/gcp--gemini-2.5-pro",
|
|
providerID: "sap-ai-core",
|
|
api: {
|
|
id: "gcp--gemini-2.5-pro",
|
|
url: "https://api.ai.sap",
|
|
npm: "@jerome-benoit/sap-ai-provider-v2",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["high", "max"])
|
|
expect(result.high).toEqual({
|
|
thinkingConfig: {
|
|
includeThoughts: true,
|
|
thinkingBudget: 16000,
|
|
},
|
|
})
|
|
expect(result.max).toEqual({
|
|
thinkingConfig: {
|
|
includeThoughts: true,
|
|
thinkingBudget: 24576,
|
|
},
|
|
})
|
|
})
|
|
|
|
test("gpt models return reasoningEffort variants", () => {
|
|
const model = createMockModel({
|
|
id: "sap-ai-core/azure-openai--gpt-4o",
|
|
providerID: "sap-ai-core",
|
|
api: {
|
|
id: "azure-openai--gpt-4o",
|
|
url: "https://api.ai.sap",
|
|
npm: "@jerome-benoit/sap-ai-provider-v2",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
expect(result.low).toEqual({ reasoningEffort: "low" })
|
|
expect(result.high).toEqual({ reasoningEffort: "high" })
|
|
})
|
|
|
|
test("o-series models return reasoningEffort variants", () => {
|
|
const model = createMockModel({
|
|
id: "sap-ai-core/azure-openai--o3-mini",
|
|
providerID: "sap-ai-core",
|
|
api: {
|
|
id: "azure-openai--o3-mini",
|
|
url: "https://api.ai.sap",
|
|
npm: "@jerome-benoit/sap-ai-provider-v2",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(Object.keys(result)).toEqual(["low", "medium", "high"])
|
|
expect(result.low).toEqual({ reasoningEffort: "low" })
|
|
expect(result.high).toEqual({ reasoningEffort: "high" })
|
|
})
|
|
|
|
test("sonar models return empty object", () => {
|
|
const model = createMockModel({
|
|
id: "sap-ai-core/perplexity--sonar-pro",
|
|
providerID: "sap-ai-core",
|
|
api: {
|
|
id: "perplexity--sonar-pro",
|
|
url: "https://api.ai.sap",
|
|
npm: "@jerome-benoit/sap-ai-provider-v2",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
|
|
test("mistral models return empty object", () => {
|
|
const model = createMockModel({
|
|
id: "sap-ai-core/mistral--mistral-large",
|
|
providerID: "sap-ai-core",
|
|
api: {
|
|
id: "mistral--mistral-large",
|
|
url: "https://api.ai.sap",
|
|
npm: "@jerome-benoit/sap-ai-provider-v2",
|
|
},
|
|
})
|
|
const result = ProviderTransform.variants(model)
|
|
expect(result).toEqual({})
|
|
})
|
|
})
|
|
})
|