Files
tf_code/packages/opencode/src/provider/transform.ts
opencode-agent[bot] 9f150b0776 chore: generate
2026-03-03 04:41:52 +00:00

980 lines
32 KiB
TypeScript

import type { ModelMessage } from "ai"
import { mergeDeep, unique } from "remeda"
import type { JSONSchema7 } from "@ai-sdk/provider"
import type { JSONSchema } from "zod/v4/core"
import type { Provider } from "./provider"
import type { ModelsDev } from "./models"
import { iife } from "@/util/iife"
import { Flag } from "@/flag/flag"
type Modality = NonNullable<ModelsDev.Model["modalities"]>["input"][number]
function mimeToModality(mime: string): Modality | undefined {
if (mime.startsWith("image/")) return "image"
if (mime.startsWith("audio/")) return "audio"
if (mime.startsWith("video/")) return "video"
if (mime === "application/pdf") return "pdf"
return undefined
}
export namespace ProviderTransform {
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
// Maps npm package to the key the AI SDK expects for providerOptions
function sdkKey(npm: string): string | undefined {
switch (npm) {
case "@ai-sdk/github-copilot":
return "copilot"
case "@ai-sdk/openai":
case "@ai-sdk/azure":
return "openai"
case "@ai-sdk/amazon-bedrock":
return "bedrock"
case "@ai-sdk/anthropic":
case "@ai-sdk/google-vertex/anthropic":
return "anthropic"
case "@ai-sdk/google-vertex":
case "@ai-sdk/google":
return "google"
case "@ai-sdk/gateway":
return "gateway"
case "@openrouter/ai-sdk-provider":
return "openrouter"
}
return undefined
}
function normalizeMessages(
msgs: ModelMessage[],
model: Provider.Model,
options: Record<string, unknown>,
): ModelMessage[] {
// Anthropic rejects messages with empty content - filter out empty string messages
// and remove empty text/reasoning parts from array content
if (model.api.npm === "@ai-sdk/anthropic") {
msgs = msgs
.map((msg) => {
if (typeof msg.content === "string") {
if (msg.content === "") return undefined
return msg
}
if (!Array.isArray(msg.content)) return msg
const filtered = msg.content.filter((part) => {
if (part.type === "text" || part.type === "reasoning") {
return part.text !== ""
}
return true
})
if (filtered.length === 0) return undefined
return { ...msg, content: filtered }
})
.filter((msg): msg is ModelMessage => msg !== undefined && msg.content !== "")
}
if (model.api.id.includes("claude")) {
return msgs.map((msg) => {
if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
msg.content = msg.content.map((part) => {
if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
return {
...part,
toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
}
}
return part
})
}
return msg
})
}
if (
model.providerID === "mistral" ||
model.api.id.toLowerCase().includes("mistral") ||
model.api.id.toLocaleLowerCase().includes("devstral")
) {
const result: ModelMessage[] = []
for (let i = 0; i < msgs.length; i++) {
const msg = msgs[i]
const nextMsg = msgs[i + 1]
if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
msg.content = msg.content.map((part) => {
if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
// Mistral requires alphanumeric tool call IDs with exactly 9 characters
const normalizedId = part.toolCallId
.replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
.substring(0, 9) // Take first 9 characters
.padEnd(9, "0") // Pad with zeros if less than 9 characters
return {
...part,
toolCallId: normalizedId,
}
}
return part
})
}
result.push(msg)
// Fix message sequence: tool messages cannot be followed by user messages
if (msg.role === "tool" && nextMsg?.role === "user") {
result.push({
role: "assistant",
content: [
{
type: "text",
text: "Done.",
},
],
})
}
}
return result
}
if (typeof model.capabilities.interleaved === "object" && model.capabilities.interleaved.field) {
const field = model.capabilities.interleaved.field
return msgs.map((msg) => {
if (msg.role === "assistant" && Array.isArray(msg.content)) {
const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning")
const reasoningText = reasoningParts.map((part: any) => part.text).join("")
// Filter out reasoning parts from content
const filteredContent = msg.content.filter((part: any) => part.type !== "reasoning")
// Include reasoning_content | reasoning_details directly on the message for all assistant messages
if (reasoningText) {
return {
...msg,
content: filteredContent,
providerOptions: {
...msg.providerOptions,
openaiCompatible: {
...(msg.providerOptions as any)?.openaiCompatible,
[field]: reasoningText,
},
},
}
}
return {
...msg,
content: filteredContent,
}
}
return msg
})
}
return msgs
}
function applyCaching(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
const providerOptions = {
anthropic: {
cacheControl: { type: "ephemeral" },
},
openrouter: {
cacheControl: { type: "ephemeral" },
},
bedrock: {
cachePoint: { type: "default" },
},
openaiCompatible: {
cache_control: { type: "ephemeral" },
},
copilot: {
copilot_cache_control: { type: "ephemeral" },
},
}
for (const msg of unique([...system, ...final])) {
const useMessageLevelOptions = model.providerID === "anthropic" || model.providerID.includes("bedrock")
const shouldUseContentOptions = !useMessageLevelOptions && Array.isArray(msg.content) && msg.content.length > 0
if (shouldUseContentOptions) {
const lastContent = msg.content[msg.content.length - 1]
if (lastContent && typeof lastContent === "object") {
lastContent.providerOptions = mergeDeep(lastContent.providerOptions ?? {}, providerOptions)
continue
}
}
msg.providerOptions = mergeDeep(msg.providerOptions ?? {}, providerOptions)
}
return msgs
}
function unsupportedParts(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
return msgs.map((msg) => {
if (msg.role !== "user" || !Array.isArray(msg.content)) return msg
const filtered = msg.content.map((part) => {
if (part.type !== "file" && part.type !== "image") return part
// Check for empty base64 image data
if (part.type === "image") {
const imageStr = part.image.toString()
if (imageStr.startsWith("data:")) {
const match = imageStr.match(/^data:([^;]+);base64,(.*)$/)
if (match && (!match[2] || match[2].length === 0)) {
return {
type: "text" as const,
text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
}
}
}
}
const mime = part.type === "image" ? part.image.toString().split(";")[0].replace("data:", "") : part.mediaType
const filename = part.type === "file" ? part.filename : undefined
const modality = mimeToModality(mime)
if (!modality) return part
if (model.capabilities.input[modality]) return part
const name = filename ? `"${filename}"` : modality
return {
type: "text" as const,
text: `ERROR: Cannot read ${name} (this model does not support ${modality} input). Inform the user.`,
}
})
return { ...msg, content: filtered }
})
}
export function message(msgs: ModelMessage[], model: Provider.Model, options: Record<string, unknown>) {
msgs = unsupportedParts(msgs, model)
msgs = normalizeMessages(msgs, model, options)
if (
(model.providerID === "anthropic" ||
model.api.id.includes("anthropic") ||
model.api.id.includes("claude") ||
model.id.includes("anthropic") ||
model.id.includes("claude") ||
model.api.npm === "@ai-sdk/anthropic") &&
model.api.npm !== "@ai-sdk/gateway"
) {
msgs = applyCaching(msgs, model)
}
// Remap providerOptions keys from stored providerID to expected SDK key
const key = sdkKey(model.api.npm)
if (key && key !== model.providerID && model.api.npm !== "@ai-sdk/azure") {
const remap = (opts: Record<string, any> | undefined) => {
if (!opts) return opts
if (!(model.providerID in opts)) return opts
const result = { ...opts }
result[key] = result[model.providerID]
delete result[model.providerID]
return result
}
msgs = msgs.map((msg) => {
if (!Array.isArray(msg.content)) return { ...msg, providerOptions: remap(msg.providerOptions) }
return {
...msg,
providerOptions: remap(msg.providerOptions),
content: msg.content.map((part) => ({ ...part, providerOptions: remap(part.providerOptions) })),
} as typeof msg
})
}
return msgs
}
export function temperature(model: Provider.Model) {
const id = model.id.toLowerCase()
if (id.includes("qwen")) return 0.55
if (id.includes("claude")) return undefined
if (id.includes("gemini")) return 1.0
if (id.includes("glm-4.6")) return 1.0
if (id.includes("glm-4.7")) return 1.0
if (id.includes("minimax-m2")) return 1.0
if (id.includes("kimi-k2")) {
// kimi-k2-thinking & kimi-k2.5 && kimi-k2p5 && kimi-k2-5
if (["thinking", "k2.", "k2p", "k2-5"].some((s) => id.includes(s))) {
return 1.0
}
return 0.6
}
return undefined
}
export function topP(model: Provider.Model) {
const id = model.id.toLowerCase()
if (id.includes("qwen")) return 1
if (["minimax-m2", "gemini", "kimi-k2.5", "kimi-k2p5", "kimi-k2-5"].some((s) => id.includes(s))) {
return 0.95
}
return undefined
}
export function topK(model: Provider.Model) {
const id = model.id.toLowerCase()
if (id.includes("minimax-m2")) {
if (["m2.", "m25", "m21"].some((s) => id.includes(s))) return 40
return 20
}
if (id.includes("gemini")) return 64
return undefined
}
const WIDELY_SUPPORTED_EFFORTS = ["low", "medium", "high"]
const OPENAI_EFFORTS = ["none", "minimal", ...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
export function variants(model: Provider.Model): Record<string, Record<string, any>> {
if (!model.capabilities.reasoning) return {}
const id = model.id.toLowerCase()
const isAnthropicAdaptive = ["opus-4-6", "opus-4.6", "sonnet-4-6", "sonnet-4.6"].some((v) =>
model.api.id.includes(v),
)
const adaptiveEfforts = ["low", "medium", "high", "max"]
if (
id.includes("deepseek") ||
id.includes("minimax") ||
id.includes("glm") ||
id.includes("mistral") ||
id.includes("kimi") ||
// TODO: Remove this after models.dev data is fixed to use "kimi-k2.5" instead of "k2p5"
id.includes("k2p5")
)
return {}
// see: https://docs.x.ai/docs/guides/reasoning#control-how-hard-the-model-thinks
if (id.includes("grok") && id.includes("grok-3-mini")) {
if (model.api.npm === "@openrouter/ai-sdk-provider") {
return {
low: { reasoning: { effort: "low" } },
high: { reasoning: { effort: "high" } },
}
}
return {
low: { reasoningEffort: "low" },
high: { reasoningEffort: "high" },
}
}
if (id.includes("grok")) return {}
switch (model.api.npm) {
case "@openrouter/ai-sdk-provider":
if (!model.id.includes("gpt") && !model.id.includes("gemini-3") && !model.id.includes("claude")) return {}
return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoning: { effort } }]))
case "@ai-sdk/gateway":
if (model.id.includes("anthropic")) {
if (isAnthropicAdaptive) {
return Object.fromEntries(
adaptiveEfforts.map((effort) => [
effort,
{
thinking: {
type: "adaptive",
},
effort,
},
]),
)
}
return {
high: {
thinking: {
type: "enabled",
budgetTokens: 16000,
},
},
max: {
thinking: {
type: "enabled",
budgetTokens: 31999,
},
},
}
}
if (model.id.includes("google")) {
if (id.includes("2.5")) {
return {
high: {
thinkingConfig: {
includeThoughts: true,
thinkingBudget: 16000,
},
},
max: {
thinkingConfig: {
includeThoughts: true,
thinkingBudget: 24576,
},
},
}
}
return Object.fromEntries(
["low", "high"].map((effort) => [
effort,
{
includeThoughts: true,
thinkingLevel: effort,
},
]),
)
}
return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
case "@ai-sdk/github-copilot":
if (model.id.includes("gemini")) {
// currently github copilot only returns thinking
return {}
}
if (model.id.includes("claude")) {
return {
thinking: { thinking_budget: 4000 },
}
}
const copilotEfforts = iife(() => {
if (id.includes("5.1-codex-max") || id.includes("5.2") || id.includes("5.3"))
return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
return WIDELY_SUPPORTED_EFFORTS
})
return Object.fromEntries(
copilotEfforts.map((effort) => [
effort,
{
reasoningEffort: effort,
reasoningSummary: "auto",
include: ["reasoning.encrypted_content"],
},
]),
)
case "@ai-sdk/cerebras":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/cerebras
case "@ai-sdk/togetherai":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/togetherai
case "@ai-sdk/xai":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/xai
case "@ai-sdk/deepinfra":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/deepinfra
case "venice-ai-sdk-provider":
// https://docs.venice.ai/overview/guides/reasoning-models#reasoning-effort
case "@ai-sdk/openai-compatible":
return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
case "@ai-sdk/azure":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/azure
if (id === "o1-mini") return {}
const azureEfforts = ["low", "medium", "high"]
if (id.includes("gpt-5-") || id === "gpt-5") {
azureEfforts.unshift("minimal")
}
return Object.fromEntries(
azureEfforts.map((effort) => [
effort,
{
reasoningEffort: effort,
reasoningSummary: "auto",
include: ["reasoning.encrypted_content"],
},
]),
)
case "@ai-sdk/openai":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/openai
if (id === "gpt-5-pro") return {}
const openaiEfforts = iife(() => {
if (id.includes("codex")) {
if (id.includes("5.2") || id.includes("5.3")) return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
return WIDELY_SUPPORTED_EFFORTS
}
const arr = [...WIDELY_SUPPORTED_EFFORTS]
if (id.includes("gpt-5-") || id === "gpt-5") {
arr.unshift("minimal")
}
if (model.release_date >= "2025-11-13") {
arr.unshift("none")
}
if (model.release_date >= "2025-12-04") {
arr.push("xhigh")
}
return arr
})
return Object.fromEntries(
openaiEfforts.map((effort) => [
effort,
{
reasoningEffort: effort,
reasoningSummary: "auto",
include: ["reasoning.encrypted_content"],
},
]),
)
case "@ai-sdk/anthropic":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/anthropic
case "@ai-sdk/google-vertex/anthropic":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex#anthropic-provider
if (isAnthropicAdaptive) {
return Object.fromEntries(
adaptiveEfforts.map((effort) => [
effort,
{
thinking: {
type: "adaptive",
},
effort,
},
]),
)
}
return {
high: {
thinking: {
type: "enabled",
budgetTokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)),
},
},
max: {
thinking: {
type: "enabled",
budgetTokens: Math.min(31_999, model.limit.output - 1),
},
},
}
case "@ai-sdk/amazon-bedrock":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/amazon-bedrock
if (isAnthropicAdaptive) {
return Object.fromEntries(
adaptiveEfforts.map((effort) => [
effort,
{
reasoningConfig: {
type: "adaptive",
maxReasoningEffort: effort,
},
},
]),
)
}
// For Anthropic models on Bedrock, use reasoningConfig with budgetTokens
if (model.api.id.includes("anthropic")) {
return {
high: {
reasoningConfig: {
type: "enabled",
budgetTokens: 16000,
},
},
max: {
reasoningConfig: {
type: "enabled",
budgetTokens: 31999,
},
},
}
}
// For Amazon Nova models, use reasoningConfig with maxReasoningEffort
return Object.fromEntries(
WIDELY_SUPPORTED_EFFORTS.map((effort) => [
effort,
{
reasoningConfig: {
type: "enabled",
maxReasoningEffort: effort,
},
},
]),
)
case "@ai-sdk/google-vertex":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex
case "@ai-sdk/google":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai
if (id.includes("2.5")) {
return {
high: {
thinkingConfig: {
includeThoughts: true,
thinkingBudget: 16000,
},
},
max: {
thinkingConfig: {
includeThoughts: true,
thinkingBudget: 24576,
},
},
}
}
let levels = ["low", "high"]
if (id.includes("3.1")) {
levels = ["low", "medium", "high"]
}
return Object.fromEntries(
levels.map((effort) => [
effort,
{
thinkingConfig: {
includeThoughts: true,
thinkingLevel: effort,
},
},
]),
)
case "@ai-sdk/mistral":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/mistral
return {}
case "@ai-sdk/cohere":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/cohere
return {}
case "@ai-sdk/groq":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/groq
const groqEffort = ["none", ...WIDELY_SUPPORTED_EFFORTS]
return Object.fromEntries(
groqEffort.map((effort) => [
effort,
{
reasoningEffort: effort,
},
]),
)
case "@ai-sdk/perplexity":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/perplexity
return {}
case "@mymediset/sap-ai-provider":
case "@jerome-benoit/sap-ai-provider-v2":
if (model.api.id.includes("anthropic")) {
return {
high: {
thinking: {
type: "enabled",
budgetTokens: 16000,
},
},
max: {
thinking: {
type: "enabled",
budgetTokens: 31999,
},
},
}
}
return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
}
return {}
}
export function options(input: {
model: Provider.Model
sessionID: string
providerOptions?: Record<string, any>
}): Record<string, any> {
const result: Record<string, any> = {}
// openai and providers using openai package should set store to false by default.
if (
input.model.providerID === "openai" ||
input.model.api.npm === "@ai-sdk/openai" ||
input.model.api.npm === "@ai-sdk/github-copilot"
) {
result["store"] = false
}
if (input.model.api.npm === "@openrouter/ai-sdk-provider") {
result["usage"] = {
include: true,
}
if (input.model.api.id.includes("gemini-3")) {
result["reasoning"] = { effort: "high" }
}
}
if (
input.model.providerID === "baseten" ||
(input.model.providerID === "opencode" && ["kimi-k2-thinking", "glm-4.6"].includes(input.model.api.id))
) {
result["chat_template_args"] = { enable_thinking: true }
}
if (["zai", "zhipuai"].includes(input.model.providerID) && input.model.api.npm === "@ai-sdk/openai-compatible") {
result["thinking"] = {
type: "enabled",
clear_thinking: false,
}
}
if (input.model.providerID === "openai" || input.providerOptions?.setCacheKey) {
result["promptCacheKey"] = input.sessionID
}
if (input.model.api.npm === "@ai-sdk/google" || input.model.api.npm === "@ai-sdk/google-vertex") {
result["thinkingConfig"] = {
includeThoughts: true,
}
if (input.model.api.id.includes("gemini-3")) {
result["thinkingConfig"]["thinkingLevel"] = "high"
}
}
// Enable thinking by default for kimi-k2.5/k2p5 models using anthropic SDK
const modelId = input.model.api.id.toLowerCase()
if (
(input.model.api.npm === "@ai-sdk/anthropic" || input.model.api.npm === "@ai-sdk/google-vertex/anthropic") &&
(modelId.includes("k2p5") || modelId.includes("kimi-k2.5") || modelId.includes("kimi-k2p5"))
) {
result["thinking"] = {
type: "enabled",
budgetTokens: Math.min(16_000, Math.floor(input.model.limit.output / 2 - 1)),
}
}
// Enable thinking for reasoning models on alibaba-cn (DashScope).
// DashScope's OpenAI-compatible API requires `enable_thinking: true` in the request body
// to return reasoning_content. Without it, models like kimi-k2.5, qwen-plus, qwen3, qwq,
// deepseek-r1, etc. never output thinking/reasoning tokens.
// Note: kimi-k2-thinking is excluded as it returns reasoning_content by default.
if (
input.model.providerID === "alibaba-cn" &&
input.model.capabilities.reasoning &&
input.model.api.npm === "@ai-sdk/openai-compatible" &&
!modelId.includes("kimi-k2-thinking")
) {
result["enable_thinking"] = true
}
if (input.model.api.id.includes("gpt-5") && !input.model.api.id.includes("gpt-5-chat")) {
if (!input.model.api.id.includes("gpt-5-pro")) {
result["reasoningEffort"] = "medium"
result["reasoningSummary"] = "auto"
}
// Only set textVerbosity for non-chat gpt-5.x models
// Chat models (e.g. gpt-5.2-chat-latest) only support "medium" verbosity
if (
input.model.api.id.includes("gpt-5.") &&
!input.model.api.id.includes("codex") &&
!input.model.api.id.includes("-chat") &&
input.model.providerID !== "azure"
) {
result["textVerbosity"] = "low"
}
if (input.model.providerID.startsWith("opencode")) {
result["promptCacheKey"] = input.sessionID
result["include"] = ["reasoning.encrypted_content"]
result["reasoningSummary"] = "auto"
}
}
if (input.model.providerID === "venice") {
result["promptCacheKey"] = input.sessionID
}
if (input.model.providerID === "openrouter") {
result["prompt_cache_key"] = input.sessionID
}
if (input.model.api.npm === "@ai-sdk/gateway") {
result["gateway"] = {
caching: "auto",
}
}
return result
}
export function smallOptions(model: Provider.Model) {
if (
model.providerID === "openai" ||
model.api.npm === "@ai-sdk/openai" ||
model.api.npm === "@ai-sdk/github-copilot"
) {
if (model.api.id.includes("gpt-5")) {
if (model.api.id.includes("5.")) {
return { store: false, reasoningEffort: "low" }
}
return { store: false, reasoningEffort: "minimal" }
}
return { store: false }
}
if (model.providerID === "google") {
// gemini-3 uses thinkingLevel, gemini-2.5 uses thinkingBudget
if (model.api.id.includes("gemini-3")) {
return { thinkingConfig: { thinkingLevel: "minimal" } }
}
return { thinkingConfig: { thinkingBudget: 0 } }
}
if (model.providerID === "openrouter") {
if (model.api.id.includes("google")) {
return { reasoning: { enabled: false } }
}
return { reasoningEffort: "minimal" }
}
if (model.providerID === "venice") {
return { veniceParameters: { disableThinking: true } }
}
return {}
}
// Maps model ID prefix to provider slug used in providerOptions.
// Example: "amazon/nova-2-lite" → "bedrock"
const SLUG_OVERRIDES: Record<string, string> = {
amazon: "bedrock",
}
export function providerOptions(model: Provider.Model, options: { [x: string]: any }) {
if (model.api.npm === "@ai-sdk/gateway") {
// Gateway providerOptions are split across two namespaces:
// - `gateway`: gateway-native routing/caching controls (order, only, byok, etc.)
// - `<upstream slug>`: provider-specific model options (anthropic/openai/...)
// We keep `gateway` as-is and route every other top-level option under the
// model-derived upstream slug.
const i = model.api.id.indexOf("/")
const rawSlug = i > 0 ? model.api.id.slice(0, i) : undefined
const slug = rawSlug ? (SLUG_OVERRIDES[rawSlug] ?? rawSlug) : undefined
const gateway = options.gateway
const rest = Object.fromEntries(Object.entries(options).filter(([k]) => k !== "gateway"))
const has = Object.keys(rest).length > 0
const result: Record<string, any> = {}
if (gateway !== undefined) result.gateway = gateway
if (has) {
if (slug) {
// Route model-specific options under the provider slug
result[slug] = rest
} else if (gateway && typeof gateway === "object" && !Array.isArray(gateway)) {
result.gateway = { ...gateway, ...rest }
} else {
result.gateway = rest
}
}
return result
}
const key = sdkKey(model.api.npm) ?? model.providerID
return { [key]: options }
}
export function maxOutputTokens(model: Provider.Model): number {
return Math.min(model.limit.output, OUTPUT_TOKEN_MAX) || OUTPUT_TOKEN_MAX
}
export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema | JSONSchema7): JSONSchema7 {
/*
if (["openai", "azure"].includes(providerID)) {
if (schema.type === "object" && schema.properties) {
for (const [key, value] of Object.entries(schema.properties)) {
if (schema.required?.includes(key)) continue
schema.properties[key] = {
anyOf: [
value as JSONSchema.JSONSchema,
{
type: "null",
},
],
}
}
}
}
*/
// Convert integer enums to string enums for Google/Gemini
if (model.providerID === "google" || model.api.id.includes("gemini")) {
const isPlainObject = (node: unknown): node is Record<string, any> =>
typeof node === "object" && node !== null && !Array.isArray(node)
const hasCombiner = (node: unknown) =>
isPlainObject(node) && (Array.isArray(node.anyOf) || Array.isArray(node.oneOf) || Array.isArray(node.allOf))
const hasSchemaIntent = (node: unknown) => {
if (!isPlainObject(node)) return false
if (hasCombiner(node)) return true
return [
"type",
"properties",
"items",
"prefixItems",
"enum",
"const",
"$ref",
"additionalProperties",
"patternProperties",
"required",
"not",
"if",
"then",
"else",
].some((key) => key in node)
}
const sanitizeGemini = (obj: any): any => {
if (obj === null || typeof obj !== "object") {
return obj
}
if (Array.isArray(obj)) {
return obj.map(sanitizeGemini)
}
const result: any = {}
for (const [key, value] of Object.entries(obj)) {
if (key === "enum" && Array.isArray(value)) {
// Convert all enum values to strings
result[key] = value.map((v) => String(v))
// If we have integer type with enum, change type to string
if (result.type === "integer" || result.type === "number") {
result.type = "string"
}
} else if (typeof value === "object" && value !== null) {
result[key] = sanitizeGemini(value)
} else {
result[key] = value
}
}
// Filter required array to only include fields that exist in properties
if (result.type === "object" && result.properties && Array.isArray(result.required)) {
result.required = result.required.filter((field: any) => field in result.properties)
}
if (result.type === "array" && !hasCombiner(result)) {
if (result.items == null) {
result.items = {}
}
// Ensure items has a type only when it's still schema-empty.
if (isPlainObject(result.items) && !hasSchemaIntent(result.items)) {
result.items.type = "string"
}
}
// Remove properties/required from non-object types (Gemini rejects these)
if (result.type && result.type !== "object" && !hasCombiner(result)) {
delete result.properties
delete result.required
}
return result
}
schema = sanitizeGemini(schema)
}
return schema as JSONSchema7
}
}