refactor: apply minimal tfcode branding

- Rename packages/opencode → packages/tfcode (directory only)
- Rename bin/opencode → bin/tfcode (CLI binary)
- Rename .opencode → .tfcode (config directory)
- Update package.json name and bin field
- Update config directory path references (.tfcode)
- Keep internal code references as 'opencode' for easy upstream sync
- Keep @opencode-ai/* workspace package names

This minimal branding approach allows clean merges from upstream
opencode repository while providing tfcode branding for users.
This commit is contained in:
Gab
2026-03-24 13:19:59 +11:00
parent 8bcbd40e9b
commit a8b73fd754
608 changed files with 26 additions and 32 deletions

View File

@@ -0,0 +1,251 @@
import type { AuthOuathResult, Hooks } from "@opencode-ai/plugin"
import { NamedError } from "@opencode-ai/util/error"
import { Auth } from "@/auth"
import { InstanceState } from "@/effect/instance-state"
import { makeRunPromise } from "@/effect/run-service"
import { Plugin } from "../plugin"
import { ProviderID } from "./schema"
import { Array as Arr, Effect, Layer, Record, Result, ServiceMap } from "effect"
import z from "zod"
export namespace ProviderAuth {
export const Method = z
.object({
type: z.union([z.literal("oauth"), z.literal("api")]),
label: z.string(),
prompts: z
.array(
z.union([
z.object({
type: z.literal("text"),
key: z.string(),
message: z.string(),
placeholder: z.string().optional(),
when: z
.object({
key: z.string(),
op: z.union([z.literal("eq"), z.literal("neq")]),
value: z.string(),
})
.optional(),
}),
z.object({
type: z.literal("select"),
key: z.string(),
message: z.string(),
options: z.array(
z.object({
label: z.string(),
value: z.string(),
hint: z.string().optional(),
}),
),
when: z
.object({
key: z.string(),
op: z.union([z.literal("eq"), z.literal("neq")]),
value: z.string(),
})
.optional(),
}),
]),
)
.optional(),
})
.meta({
ref: "ProviderAuthMethod",
})
export type Method = z.infer<typeof Method>
export const Authorization = z
.object({
url: z.string(),
method: z.union([z.literal("auto"), z.literal("code")]),
instructions: z.string(),
})
.meta({
ref: "ProviderAuthAuthorization",
})
export type Authorization = z.infer<typeof Authorization>
export const OauthMissing = NamedError.create("ProviderAuthOauthMissing", z.object({ providerID: ProviderID.zod }))
export const OauthCodeMissing = NamedError.create(
"ProviderAuthOauthCodeMissing",
z.object({ providerID: ProviderID.zod }),
)
export const OauthCallbackFailed = NamedError.create("ProviderAuthOauthCallbackFailed", z.object({}))
export const ValidationFailed = NamedError.create(
"ProviderAuthValidationFailed",
z.object({
field: z.string(),
message: z.string(),
}),
)
export type Error =
| Auth.AuthError
| InstanceType<typeof OauthMissing>
| InstanceType<typeof OauthCodeMissing>
| InstanceType<typeof OauthCallbackFailed>
| InstanceType<typeof ValidationFailed>
type Hook = NonNullable<Hooks["auth"]>
export interface Interface {
readonly methods: () => Effect.Effect<Record<ProviderID, Method[]>>
readonly authorize: (input: {
providerID: ProviderID
method: number
inputs?: Record<string, string>
}) => Effect.Effect<Authorization | undefined, Error>
readonly callback: (input: { providerID: ProviderID; method: number; code?: string }) => Effect.Effect<void, Error>
}
interface State {
hooks: Record<ProviderID, Hook>
pending: Map<ProviderID, AuthOuathResult>
}
export class Service extends ServiceMap.Service<Service, Interface>()("@opencode/ProviderAuth") {}
export const layer = Layer.effect(
Service,
Effect.gen(function* () {
const auth = yield* Auth.Service
const state = yield* InstanceState.make<State>(
Effect.fn("ProviderAuth.state")(() =>
Effect.promise(async () => {
const plugins = await Plugin.list()
return {
hooks: Record.fromEntries(
Arr.filterMap(plugins, (x) =>
x.auth?.provider !== undefined
? Result.succeed([ProviderID.make(x.auth.provider), x.auth] as const)
: Result.failVoid,
),
),
pending: new Map<ProviderID, AuthOuathResult>(),
}
}),
),
)
const methods = Effect.fn("ProviderAuth.methods")(function* () {
const hooks = (yield* InstanceState.get(state)).hooks
return Record.map(hooks, (item) =>
item.methods.map(
(method): Method => ({
type: method.type,
label: method.label,
prompts: method.prompts?.map((prompt) => {
if (prompt.type === "select") {
return {
type: "select" as const,
key: prompt.key,
message: prompt.message,
options: prompt.options,
when: prompt.when,
}
}
return {
type: "text" as const,
key: prompt.key,
message: prompt.message,
placeholder: prompt.placeholder,
when: prompt.when,
}
}),
}),
),
)
})
const authorize = Effect.fn("ProviderAuth.authorize")(function* (input: {
providerID: ProviderID
method: number
inputs?: Record<string, string>
}) {
const { hooks, pending } = yield* InstanceState.get(state)
const method = hooks[input.providerID].methods[input.method]
if (method.type !== "oauth") return
if (method.prompts && input.inputs) {
for (const prompt of method.prompts) {
if (prompt.type === "text" && prompt.validate && input.inputs[prompt.key] !== undefined) {
const error = prompt.validate(input.inputs[prompt.key])
if (error) return yield* Effect.fail(new ValidationFailed({ field: prompt.key, message: error }))
}
}
}
const result = yield* Effect.promise(() => method.authorize(input.inputs))
pending.set(input.providerID, result)
return {
url: result.url,
method: result.method,
instructions: result.instructions,
}
})
const callback = Effect.fn("ProviderAuth.callback")(function* (input: {
providerID: ProviderID
method: number
code?: string
}) {
const pending = (yield* InstanceState.get(state)).pending
const match = pending.get(input.providerID)
if (!match) return yield* Effect.fail(new OauthMissing({ providerID: input.providerID }))
if (match.method === "code" && !input.code) {
return yield* Effect.fail(new OauthCodeMissing({ providerID: input.providerID }))
}
const result = yield* Effect.promise(() =>
match.method === "code" ? match.callback(input.code!) : match.callback(),
)
if (!result || result.type !== "success") return yield* Effect.fail(new OauthCallbackFailed({}))
if ("key" in result) {
yield* auth.set(input.providerID, {
type: "api",
key: result.key,
})
}
if ("refresh" in result) {
yield* auth.set(input.providerID, {
type: "oauth",
access: result.access,
refresh: result.refresh,
expires: result.expires,
...(result.accountId ? { accountId: result.accountId } : {}),
})
}
})
return Service.of({ methods, authorize, callback })
}),
)
export const defaultLayer = layer.pipe(Layer.provide(Auth.layer))
const runPromise = makeRunPromise(Service, defaultLayer)
export async function methods() {
return runPromise((svc) => svc.methods())
}
export async function authorize(input: {
providerID: ProviderID
method: number
inputs?: Record<string, string>
}): Promise<Authorization | undefined> {
return runPromise((svc) => svc.authorize(input))
}
export async function callback(input: { providerID: ProviderID; method: number; code?: string }) {
return runPromise((svc) => svc.callback(input))
}
}

View File

@@ -0,0 +1,194 @@
import { APICallError } from "ai"
import { STATUS_CODES } from "http"
import { iife } from "@/util/iife"
import type { ProviderID } from "./schema"
export namespace ProviderError {
// Adapted from overflow detection patterns in:
// https://github.com/badlogic/pi-mono/blob/main/packages/ai/src/utils/overflow.ts
const OVERFLOW_PATTERNS = [
/prompt is too long/i, // Anthropic
/input is too long for requested model/i, // Amazon Bedrock
/exceeds the context window/i, // OpenAI (Completions + Responses API message text)
/input token count.*exceeds the maximum/i, // Google (Gemini)
/maximum prompt length is \d+/i, // xAI (Grok)
/reduce the length of the messages/i, // Groq
/maximum context length is \d+ tokens/i, // OpenRouter, DeepSeek, vLLM
/exceeds the limit of \d+/i, // GitHub Copilot
/exceeds the available context size/i, // llama.cpp server
/greater than the context length/i, // LM Studio
/context window exceeds limit/i, // MiniMax
/exceeded model token limit/i, // Kimi For Coding, Moonshot
/context[_ ]length[_ ]exceeded/i, // Generic fallback
/request entity too large/i, // HTTP 413
/context length is only \d+ tokens/i, // vLLM
/input length.*exceeds.*context length/i, // vLLM
]
function isOpenAiErrorRetryable(e: APICallError) {
const status = e.statusCode
if (!status) return e.isRetryable
// openai sometimes returns 404 for models that are actually available
return status === 404 || e.isRetryable
}
// Providers not reliably handled in this function:
// - z.ai: can accept overflow silently (needs token-count/context-window checks)
function isOverflow(message: string) {
if (OVERFLOW_PATTERNS.some((p) => p.test(message))) return true
// Providers/status patterns handled outside of regex list:
// - Cerebras: often returns "400 (no body)" / "413 (no body)"
// - Mistral: often returns "400 (no body)" / "413 (no body)"
return /^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message)
}
function message(providerID: ProviderID, e: APICallError) {
return iife(() => {
const msg = e.message
if (msg === "") {
if (e.responseBody) return e.responseBody
if (e.statusCode) {
const err = STATUS_CODES[e.statusCode]
if (err) return err
}
return "Unknown error"
}
if (!e.responseBody || (e.statusCode && msg !== STATUS_CODES[e.statusCode])) {
return msg
}
try {
const body = JSON.parse(e.responseBody)
// try to extract common error message fields
const errMsg = body.message || body.error || body.error?.message
if (errMsg && typeof errMsg === "string") {
return `${msg}: ${errMsg}`
}
} catch {}
// If responseBody is HTML (e.g. from a gateway or proxy error page),
// provide a human-readable message instead of dumping raw markup
if (/^\s*<!doctype|^\s*<html/i.test(e.responseBody)) {
if (e.statusCode === 401) {
return "Unauthorized: request was blocked by a gateway or proxy. Your authentication token may be missing or expired — try running `opencode auth login <your provider URL>` to re-authenticate."
}
if (e.statusCode === 403) {
return "Forbidden: request was blocked by a gateway or proxy. You may not have permission to access this resource — check your account and provider settings."
}
return msg
}
return `${msg}: ${e.responseBody}`
}).trim()
}
function json(input: unknown) {
if (typeof input === "string") {
try {
const result = JSON.parse(input)
if (result && typeof result === "object") return result
return undefined
} catch {
return undefined
}
}
if (typeof input === "object" && input !== null) {
return input
}
return undefined
}
export type ParsedStreamError =
| {
type: "context_overflow"
message: string
responseBody: string
}
| {
type: "api_error"
message: string
isRetryable: false
responseBody: string
}
export function parseStreamError(input: unknown): ParsedStreamError | undefined {
const body = json(input)
if (!body) return
const responseBody = JSON.stringify(body)
if (body.type !== "error") return
switch (body?.error?.code) {
case "context_length_exceeded":
return {
type: "context_overflow",
message: "Input exceeds context window of this model",
responseBody,
}
case "insufficient_quota":
return {
type: "api_error",
message: "Quota exceeded. Check your plan and billing details.",
isRetryable: false,
responseBody,
}
case "usage_not_included":
return {
type: "api_error",
message: "To use Codex with your ChatGPT plan, upgrade to Plus: https://chatgpt.com/explore/plus.",
isRetryable: false,
responseBody,
}
case "invalid_prompt":
return {
type: "api_error",
message: typeof body?.error?.message === "string" ? body?.error?.message : "Invalid prompt.",
isRetryable: false,
responseBody,
}
}
}
export type ParsedAPICallError =
| {
type: "context_overflow"
message: string
responseBody?: string
}
| {
type: "api_error"
message: string
statusCode?: number
isRetryable: boolean
responseHeaders?: Record<string, string>
responseBody?: string
metadata?: Record<string, string>
}
export function parseAPICallError(input: { providerID: ProviderID; error: APICallError }): ParsedAPICallError {
const m = message(input.providerID, input.error)
const body = json(input.error.responseBody)
if (isOverflow(m) || input.error.statusCode === 413 || body?.error?.code === "context_length_exceeded") {
return {
type: "context_overflow",
message: m,
responseBody: input.error.responseBody,
}
}
const metadata = input.error.url ? { url: input.error.url } : undefined
return {
type: "api_error",
message: m,
statusCode: input.error.statusCode,
isRetryable: input.providerID.startsWith("openai")
? isOpenAiErrorRetryable(input.error)
: input.error.isRetryable,
responseHeaders: input.error.responseHeaders,
responseBody: input.error.responseBody,
metadata,
}
}
}

View File

@@ -0,0 +1,132 @@
import { Global } from "../global"
import { Log } from "../util/log"
import path from "path"
import z from "zod"
import { Installation } from "../installation"
import { Flag } from "../flag/flag"
import { lazy } from "@/util/lazy"
import { Filesystem } from "../util/filesystem"
// Try to import bundled snapshot (generated at build time)
// Falls back to undefined in dev mode when snapshot doesn't exist
/* @ts-ignore */
export namespace ModelsDev {
const log = Log.create({ service: "models.dev" })
const filepath = path.join(Global.Path.cache, "models.json")
export const Model = z.object({
id: z.string(),
name: z.string(),
family: z.string().optional(),
release_date: z.string(),
attachment: z.boolean(),
reasoning: z.boolean(),
temperature: z.boolean(),
tool_call: z.boolean(),
interleaved: z
.union([
z.literal(true),
z
.object({
field: z.enum(["reasoning_content", "reasoning_details"]),
})
.strict(),
])
.optional(),
cost: z
.object({
input: z.number(),
output: z.number(),
cache_read: z.number().optional(),
cache_write: z.number().optional(),
context_over_200k: z
.object({
input: z.number(),
output: z.number(),
cache_read: z.number().optional(),
cache_write: z.number().optional(),
})
.optional(),
})
.optional(),
limit: z.object({
context: z.number(),
input: z.number().optional(),
output: z.number(),
}),
modalities: z
.object({
input: z.array(z.enum(["text", "audio", "image", "video", "pdf"])),
output: z.array(z.enum(["text", "audio", "image", "video", "pdf"])),
})
.optional(),
experimental: z.boolean().optional(),
status: z.enum(["alpha", "beta", "deprecated"]).optional(),
options: z.record(z.string(), z.any()),
headers: z.record(z.string(), z.string()).optional(),
provider: z.object({ npm: z.string().optional(), api: z.string().optional() }).optional(),
variants: z.record(z.string(), z.record(z.string(), z.any())).optional(),
})
export type Model = z.infer<typeof Model>
export const Provider = z.object({
api: z.string().optional(),
name: z.string(),
env: z.array(z.string()),
id: z.string(),
npm: z.string().optional(),
models: z.record(z.string(), Model),
})
export type Provider = z.infer<typeof Provider>
function url() {
return Flag.OPENCODE_MODELS_URL || "https://models.dev"
}
export const Data = lazy(async () => {
const result = await Filesystem.readJson(Flag.OPENCODE_MODELS_PATH ?? filepath).catch(() => {})
if (result) return result
// @ts-ignore
const snapshot = await import("./models-snapshot")
.then((m) => m.snapshot as Record<string, unknown>)
.catch(() => undefined)
if (snapshot) return snapshot
if (Flag.OPENCODE_DISABLE_MODELS_FETCH) return {}
const json = await fetch(`${url()}/api.json`).then((x) => x.text())
return JSON.parse(json)
})
export async function get() {
const result = await Data()
return result as Record<string, Provider>
}
export async function refresh() {
const result = await fetch(`${url()}/api.json`, {
headers: {
"User-Agent": Installation.USER_AGENT,
},
signal: AbortSignal.timeout(10 * 1000),
}).catch((e) => {
log.error("Failed to fetch models.dev", {
error: e,
})
})
if (result && result.ok) {
await Filesystem.write(filepath, await result.text())
ModelsDev.Data.reset()
}
}
}
if (!Flag.OPENCODE_DISABLE_MODELS_FETCH && !process.argv.includes("--get-yargs-completions")) {
ModelsDev.refresh()
setInterval(
async () => {
await ModelsDev.refresh()
},
60 * 1000 * 60,
).unref()
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,38 @@
import { Schema } from "effect"
import z from "zod"
import { withStatics } from "@/util/schema"
const providerIdSchema = Schema.String.pipe(Schema.brand("ProviderID"))
export type ProviderID = typeof providerIdSchema.Type
export const ProviderID = providerIdSchema.pipe(
withStatics((schema: typeof providerIdSchema) => ({
make: (id: string) => schema.makeUnsafe(id),
zod: z.string().pipe(z.custom<ProviderID>()),
// Well-known providers
opencode: schema.makeUnsafe("opencode"),
anthropic: schema.makeUnsafe("anthropic"),
openai: schema.makeUnsafe("openai"),
google: schema.makeUnsafe("google"),
googleVertex: schema.makeUnsafe("google-vertex"),
githubCopilot: schema.makeUnsafe("github-copilot"),
amazonBedrock: schema.makeUnsafe("amazon-bedrock"),
azure: schema.makeUnsafe("azure"),
openrouter: schema.makeUnsafe("openrouter"),
mistral: schema.makeUnsafe("mistral"),
gitlab: schema.makeUnsafe("gitlab"),
})),
)
const modelIdSchema = Schema.String.pipe(Schema.brand("ModelID"))
export type ModelID = typeof modelIdSchema.Type
export const ModelID = modelIdSchema.pipe(
withStatics((schema: typeof modelIdSchema) => ({
make: (id: string) => schema.makeUnsafe(id),
zod: z.string().pipe(z.custom<ModelID>()),
})),
)

View File

@@ -0,0 +1,5 @@
This is a temporary package used primarily for GitHub Copilot compatibility.
Avoid making changes to these files unless you only want to affect the Copilot provider.
Also, this should ONLY be used for the Copilot provider.

View File

@@ -0,0 +1,164 @@
import {
type LanguageModelV2Prompt,
type SharedV2ProviderMetadata,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
import type { OpenAICompatibleChatPrompt } from "./openai-compatible-api-types"
import { convertToBase64 } from "@ai-sdk/provider-utils"
function getOpenAIMetadata(message: { providerOptions?: SharedV2ProviderMetadata }) {
return message?.providerOptions?.copilot ?? {}
}
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Prompt): OpenAICompatibleChatPrompt {
const messages: OpenAICompatibleChatPrompt = []
for (const { role, content, ...message } of prompt) {
const metadata = getOpenAIMetadata({ ...message })
switch (role) {
case "system": {
messages.push({
role: "system",
content: content,
...metadata,
})
break
}
case "user": {
if (content.length === 1 && content[0].type === "text") {
messages.push({
role: "user",
content: content[0].text,
...getOpenAIMetadata(content[0]),
})
break
}
messages.push({
role: "user",
content: content.map((part) => {
const partMetadata = getOpenAIMetadata(part)
switch (part.type) {
case "text": {
return { type: "text", text: part.text, ...partMetadata }
}
case "file": {
if (part.mediaType.startsWith("image/")) {
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType
return {
type: "image_url",
image_url: {
url:
part.data instanceof URL
? part.data.toString()
: `data:${mediaType};base64,${convertToBase64(part.data)}`,
},
...partMetadata,
}
} else {
throw new UnsupportedFunctionalityError({
functionality: `file part media type ${part.mediaType}`,
})
}
}
}
}),
...metadata,
})
break
}
case "assistant": {
let text = ""
let reasoningText: string | undefined
let reasoningOpaque: string | undefined
const toolCalls: Array<{
id: string
type: "function"
function: { name: string; arguments: string }
}> = []
for (const part of content) {
const partMetadata = getOpenAIMetadata(part)
// Check for reasoningOpaque on any part (may be attached to text/tool-call)
const partOpaque = (part.providerOptions as { copilot?: { reasoningOpaque?: string } })?.copilot
?.reasoningOpaque
if (partOpaque && !reasoningOpaque) {
reasoningOpaque = partOpaque
}
switch (part.type) {
case "text": {
text += part.text
break
}
case "reasoning": {
if (part.text) reasoningText = part.text
break
}
case "tool-call": {
toolCalls.push({
id: part.toolCallId,
type: "function",
function: {
name: part.toolName,
arguments: JSON.stringify(part.input),
},
...partMetadata,
})
break
}
}
}
messages.push({
role: "assistant",
content: text || null,
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
reasoning_text: reasoningOpaque ? reasoningText : undefined,
reasoning_opaque: reasoningOpaque,
...metadata,
})
break
}
case "tool": {
for (const toolResponse of content) {
const output = toolResponse.output
let contentValue: string
switch (output.type) {
case "text":
case "error-text":
contentValue = output.value
break
case "content":
case "json":
case "error-json":
contentValue = JSON.stringify(output.value)
break
}
const toolResponseMetadata = getOpenAIMetadata(toolResponse)
messages.push({
role: "tool",
tool_call_id: toolResponse.toolCallId,
content: contentValue,
...toolResponseMetadata,
})
}
break
}
default: {
const _exhaustiveCheck: never = role
throw new Error(`Unsupported role: ${_exhaustiveCheck}`)
}
}
}
return messages
}

View File

@@ -0,0 +1,15 @@
export function getResponseMetadata({
id,
model,
created,
}: {
id?: string | undefined | null
created?: number | undefined | null
model?: string | undefined | null
}) {
return {
id: id ?? undefined,
modelId: model ?? undefined,
timestamp: created != null ? new Date(created * 1000) : undefined,
}
}

View File

@@ -0,0 +1,17 @@
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
export function mapOpenAICompatibleFinishReason(finishReason: string | null | undefined): LanguageModelV2FinishReason {
switch (finishReason) {
case "stop":
return "stop"
case "length":
return "length"
case "content_filter":
return "content-filter"
case "function_call":
case "tool_calls":
return "tool-calls"
default:
return "unknown"
}
}

View File

@@ -0,0 +1,64 @@
import type { JSONValue } from "@ai-sdk/provider"
export type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>
export type OpenAICompatibleMessage =
| OpenAICompatibleSystemMessage
| OpenAICompatibleUserMessage
| OpenAICompatibleAssistantMessage
| OpenAICompatibleToolMessage
// Allow for arbitrary additional properties for general purpose
// provider-metadata-specific extensibility.
type JsonRecord<T = never> = Record<string, JSONValue | JSONValue[] | T | T[] | undefined>
export interface OpenAICompatibleSystemMessage extends JsonRecord<OpenAICompatibleSystemContentPart> {
role: "system"
content: string | Array<OpenAICompatibleSystemContentPart>
}
export interface OpenAICompatibleSystemContentPart extends JsonRecord {
type: "text"
text: string
}
export interface OpenAICompatibleUserMessage extends JsonRecord<OpenAICompatibleContentPart> {
role: "user"
content: string | Array<OpenAICompatibleContentPart>
}
export type OpenAICompatibleContentPart = OpenAICompatibleContentPartText | OpenAICompatibleContentPartImage
export interface OpenAICompatibleContentPartImage extends JsonRecord {
type: "image_url"
image_url: { url: string }
}
export interface OpenAICompatibleContentPartText extends JsonRecord {
type: "text"
text: string
}
export interface OpenAICompatibleAssistantMessage extends JsonRecord<OpenAICompatibleMessageToolCall> {
role: "assistant"
content?: string | null
tool_calls?: Array<OpenAICompatibleMessageToolCall>
// Copilot-specific reasoning fields
reasoning_text?: string
reasoning_opaque?: string
}
export interface OpenAICompatibleMessageToolCall extends JsonRecord {
type: "function"
id: string
function: {
arguments: string
name: string
}
}
export interface OpenAICompatibleToolMessage extends JsonRecord {
role: "tool"
content: string
tool_call_id: string
}

View File

@@ -0,0 +1,780 @@
import {
APICallError,
InvalidResponseDataError,
type LanguageModelV2,
type LanguageModelV2CallWarning,
type LanguageModelV2Content,
type LanguageModelV2FinishReason,
type LanguageModelV2StreamPart,
type SharedV2ProviderMetadata,
} from "@ai-sdk/provider"
import {
combineHeaders,
createEventSourceResponseHandler,
createJsonErrorResponseHandler,
createJsonResponseHandler,
type FetchFunction,
generateId,
isParsableJson,
parseProviderOptions,
type ParseResult,
postJsonToApi,
type ResponseHandler,
} from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
import { convertToOpenAICompatibleChatMessages } from "./convert-to-openai-compatible-chat-messages"
import { getResponseMetadata } from "./get-response-metadata"
import { mapOpenAICompatibleFinishReason } from "./map-openai-compatible-finish-reason"
import { type OpenAICompatibleChatModelId, openaiCompatibleProviderOptions } from "./openai-compatible-chat-options"
import { defaultOpenAICompatibleErrorStructure, type ProviderErrorStructure } from "../openai-compatible-error"
import type { MetadataExtractor } from "./openai-compatible-metadata-extractor"
import { prepareTools } from "./openai-compatible-prepare-tools"
export type OpenAICompatibleChatConfig = {
provider: string
headers: () => Record<string, string | undefined>
url: (options: { modelId: string; path: string }) => string
fetch?: FetchFunction
includeUsage?: boolean
errorStructure?: ProviderErrorStructure<any>
metadataExtractor?: MetadataExtractor
/**
* Whether the model supports structured outputs.
*/
supportsStructuredOutputs?: boolean
/**
* The supported URLs for the model.
*/
supportedUrls?: () => LanguageModelV2["supportedUrls"]
}
export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
readonly specificationVersion = "v2"
readonly supportsStructuredOutputs: boolean
readonly modelId: OpenAICompatibleChatModelId
private readonly config: OpenAICompatibleChatConfig
private readonly failedResponseHandler: ResponseHandler<APICallError>
private readonly chunkSchema // type inferred via constructor
constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig) {
this.modelId = modelId
this.config = config
// initialize error handling:
const errorStructure = config.errorStructure ?? defaultOpenAICompatibleErrorStructure
this.chunkSchema = createOpenAICompatibleChatChunkSchema(errorStructure.errorSchema)
this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure)
this.supportsStructuredOutputs = config.supportsStructuredOutputs ?? false
}
get provider(): string {
return this.config.provider
}
private get providerOptionsName(): string {
return this.config.provider.split(".")[0].trim()
}
get supportedUrls() {
return this.config.supportedUrls?.() ?? {}
}
private async getArgs({
prompt,
maxOutputTokens,
temperature,
topP,
topK,
frequencyPenalty,
presencePenalty,
providerOptions,
stopSequences,
responseFormat,
seed,
toolChoice,
tools,
}: Parameters<LanguageModelV2["doGenerate"]>[0]) {
const warnings: LanguageModelV2CallWarning[] = []
// Parse provider options
const compatibleOptions = Object.assign(
(await parseProviderOptions({
provider: "copilot",
providerOptions,
schema: openaiCompatibleProviderOptions,
})) ?? {},
(await parseProviderOptions({
provider: this.providerOptionsName,
providerOptions,
schema: openaiCompatibleProviderOptions,
})) ?? {},
)
if (topK != null) {
warnings.push({ type: "unsupported-setting", setting: "topK" })
}
if (responseFormat?.type === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
warnings.push({
type: "unsupported-setting",
setting: "responseFormat",
details: "JSON response format schema is only supported with structuredOutputs",
})
}
const {
tools: openaiTools,
toolChoice: openaiToolChoice,
toolWarnings,
} = prepareTools({
tools,
toolChoice,
})
return {
args: {
// model id:
model: this.modelId,
// model specific settings:
user: compatibleOptions.user,
// standardized settings:
max_tokens: maxOutputTokens,
temperature,
top_p: topP,
frequency_penalty: frequencyPenalty,
presence_penalty: presencePenalty,
response_format:
responseFormat?.type === "json"
? this.supportsStructuredOutputs === true && responseFormat.schema != null
? {
type: "json_schema",
json_schema: {
schema: responseFormat.schema,
name: responseFormat.name ?? "response",
description: responseFormat.description,
},
}
: { type: "json_object" }
: undefined,
stop: stopSequences,
seed,
...Object.fromEntries(
Object.entries(providerOptions?.[this.providerOptionsName] ?? {}).filter(
([key]) => !Object.keys(openaiCompatibleProviderOptions.shape).includes(key),
),
),
reasoning_effort: compatibleOptions.reasoningEffort,
verbosity: compatibleOptions.textVerbosity,
// messages:
messages: convertToOpenAICompatibleChatMessages(prompt),
// tools:
tools: openaiTools,
tool_choice: openaiToolChoice,
// thinking_budget
thinking_budget: compatibleOptions.thinking_budget,
},
warnings: [...warnings, ...toolWarnings],
}
}
async doGenerate(
options: Parameters<LanguageModelV2["doGenerate"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
const { args, warnings } = await this.getArgs({ ...options })
const body = JSON.stringify(args)
const {
responseHeaders,
value: responseBody,
rawValue: rawResponse,
} = await postJsonToApi({
url: this.config.url({
path: "/chat/completions",
modelId: this.modelId,
}),
headers: combineHeaders(this.config.headers(), options.headers),
body: args,
failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createJsonResponseHandler(OpenAICompatibleChatResponseSchema),
abortSignal: options.abortSignal,
fetch: this.config.fetch,
})
const choice = responseBody.choices[0]
const content: Array<LanguageModelV2Content> = []
// text content:
const text = choice.message.content
if (text != null && text.length > 0) {
content.push({
type: "text",
text,
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
})
}
// reasoning content (Copilot uses reasoning_text):
const reasoning = choice.message.reasoning_text
if (reasoning != null && reasoning.length > 0) {
content.push({
type: "reasoning",
text: reasoning,
// Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
})
}
// tool calls:
if (choice.message.tool_calls != null) {
for (const toolCall of choice.message.tool_calls) {
content.push({
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments!,
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
})
}
}
// provider metadata:
const providerMetadata: SharedV2ProviderMetadata = {
[this.providerOptionsName]: {},
...(await this.config.metadataExtractor?.extractMetadata?.({
parsedBody: rawResponse,
})),
}
const completionTokenDetails = responseBody.usage?.completion_tokens_details
if (completionTokenDetails?.accepted_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].acceptedPredictionTokens =
completionTokenDetails?.accepted_prediction_tokens
}
if (completionTokenDetails?.rejected_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].rejectedPredictionTokens =
completionTokenDetails?.rejected_prediction_tokens
}
return {
content,
finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
usage: {
inputTokens: responseBody.usage?.prompt_tokens ?? undefined,
outputTokens: responseBody.usage?.completion_tokens ?? undefined,
totalTokens: responseBody.usage?.total_tokens ?? undefined,
reasoningTokens: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
cachedInputTokens: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
},
providerMetadata,
request: { body },
response: {
...getResponseMetadata(responseBody),
headers: responseHeaders,
body: rawResponse,
},
warnings,
}
}
async doStream(
options: Parameters<LanguageModelV2["doStream"]>[0],
): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
const { args, warnings } = await this.getArgs({ ...options })
const body = {
...args,
stream: true,
// only include stream_options when in strict compatibility mode:
stream_options: this.config.includeUsage ? { include_usage: true } : undefined,
}
const metadataExtractor = this.config.metadataExtractor?.createStreamExtractor()
const { responseHeaders, value: response } = await postJsonToApi({
url: this.config.url({
path: "/chat/completions",
modelId: this.modelId,
}),
headers: combineHeaders(this.config.headers(), options.headers),
body,
failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createEventSourceResponseHandler(this.chunkSchema),
abortSignal: options.abortSignal,
fetch: this.config.fetch,
})
const toolCalls: Array<{
id: string
type: "function"
function: {
name: string
arguments: string
}
hasFinished: boolean
}> = []
let finishReason: LanguageModelV2FinishReason = "unknown"
const usage: {
completionTokens: number | undefined
completionTokensDetails: {
reasoningTokens: number | undefined
acceptedPredictionTokens: number | undefined
rejectedPredictionTokens: number | undefined
}
promptTokens: number | undefined
promptTokensDetails: {
cachedTokens: number | undefined
}
totalTokens: number | undefined
} = {
completionTokens: undefined,
completionTokensDetails: {
reasoningTokens: undefined,
acceptedPredictionTokens: undefined,
rejectedPredictionTokens: undefined,
},
promptTokens: undefined,
promptTokensDetails: {
cachedTokens: undefined,
},
totalTokens: undefined,
}
let isFirstChunk = true
const providerOptionsName = this.providerOptionsName
let isActiveReasoning = false
let isActiveText = false
let reasoningOpaque: string | undefined
return {
stream: response.pipeThrough(
new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV2StreamPart>({
start(controller) {
controller.enqueue({ type: "stream-start", warnings })
},
// TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
transform(chunk, controller) {
// Emit raw chunk if requested (before anything else)
if (options.includeRawChunks) {
controller.enqueue({ type: "raw", rawValue: chunk.rawValue })
}
// handle failed chunk parsing / validation:
if (!chunk.success) {
finishReason = "error"
controller.enqueue({ type: "error", error: chunk.error })
return
}
const value = chunk.value
metadataExtractor?.processChunk(chunk.rawValue)
// handle error chunks:
if ("error" in value) {
finishReason = "error"
controller.enqueue({ type: "error", error: value.error.message })
return
}
if (isFirstChunk) {
isFirstChunk = false
controller.enqueue({
type: "response-metadata",
...getResponseMetadata(value),
})
}
if (value.usage != null) {
const {
prompt_tokens,
completion_tokens,
total_tokens,
prompt_tokens_details,
completion_tokens_details,
} = value.usage
usage.promptTokens = prompt_tokens ?? undefined
usage.completionTokens = completion_tokens ?? undefined
usage.totalTokens = total_tokens ?? undefined
if (completion_tokens_details?.reasoning_tokens != null) {
usage.completionTokensDetails.reasoningTokens = completion_tokens_details?.reasoning_tokens
}
if (completion_tokens_details?.accepted_prediction_tokens != null) {
usage.completionTokensDetails.acceptedPredictionTokens =
completion_tokens_details?.accepted_prediction_tokens
}
if (completion_tokens_details?.rejected_prediction_tokens != null) {
usage.completionTokensDetails.rejectedPredictionTokens =
completion_tokens_details?.rejected_prediction_tokens
}
if (prompt_tokens_details?.cached_tokens != null) {
usage.promptTokensDetails.cachedTokens = prompt_tokens_details?.cached_tokens
}
}
const choice = value.choices[0]
if (choice?.finish_reason != null) {
finishReason = mapOpenAICompatibleFinishReason(choice.finish_reason)
}
if (choice?.delta == null) {
return
}
const delta = choice.delta
// Capture reasoning_opaque for Copilot multi-turn reasoning
if (delta.reasoning_opaque) {
if (reasoningOpaque != null) {
throw new InvalidResponseDataError({
data: delta,
message:
"Multiple reasoning_opaque values received in a single response. Only one thinking part per response is supported.",
})
}
reasoningOpaque = delta.reasoning_opaque
}
// enqueue reasoning before text deltas (Copilot uses reasoning_text):
const reasoningContent = delta.reasoning_text
if (reasoningContent) {
if (!isActiveReasoning) {
controller.enqueue({
type: "reasoning-start",
id: "reasoning-0",
})
isActiveReasoning = true
}
controller.enqueue({
type: "reasoning-delta",
id: "reasoning-0",
delta: reasoningContent,
})
}
if (delta.content) {
// If reasoning was active and we're starting text, end reasoning first
// This handles the case where reasoning_opaque and content come in the same chunk
if (isActiveReasoning && !isActiveText) {
controller.enqueue({
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
isActiveReasoning = false
}
if (!isActiveText) {
controller.enqueue({
type: "text-start",
id: "txt-0",
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
isActiveText = true
}
controller.enqueue({
type: "text-delta",
id: "txt-0",
delta: delta.content,
})
}
if (delta.tool_calls != null) {
// If reasoning was active and we're starting tool calls, end reasoning first
// This handles the case where reasoning goes directly to tool calls with no content
if (isActiveReasoning) {
controller.enqueue({
type: "reasoning-end",
id: "reasoning-0",
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
isActiveReasoning = false
}
for (const toolCallDelta of delta.tool_calls) {
const index = toolCallDelta.index
if (toolCalls[index] == null) {
if (toolCallDelta.id == null) {
throw new InvalidResponseDataError({
data: toolCallDelta,
message: `Expected 'id' to be a string.`,
})
}
if (toolCallDelta.function?.name == null) {
throw new InvalidResponseDataError({
data: toolCallDelta,
message: `Expected 'function.name' to be a string.`,
})
}
controller.enqueue({
type: "tool-input-start",
id: toolCallDelta.id,
toolName: toolCallDelta.function.name,
})
toolCalls[index] = {
id: toolCallDelta.id,
type: "function",
function: {
name: toolCallDelta.function.name,
arguments: toolCallDelta.function.arguments ?? "",
},
hasFinished: false,
}
const toolCall = toolCalls[index]
if (toolCall.function?.name != null && toolCall.function?.arguments != null) {
// send delta if the argument text has already started:
if (toolCall.function.arguments.length > 0) {
controller.enqueue({
type: "tool-input-delta",
id: toolCall.id,
delta: toolCall.function.arguments,
})
}
// check if tool call is complete
// (some providers send the full tool call in one chunk):
if (isParsableJson(toolCall.function.arguments)) {
controller.enqueue({
type: "tool-input-end",
id: toolCall.id,
})
controller.enqueue({
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
toolCall.hasFinished = true
}
}
continue
}
// existing tool call, merge if not finished
const toolCall = toolCalls[index]
if (toolCall.hasFinished) {
continue
}
if (toolCallDelta.function?.arguments != null) {
toolCall.function!.arguments += toolCallDelta.function?.arguments ?? ""
}
// send delta
controller.enqueue({
type: "tool-input-delta",
id: toolCall.id,
delta: toolCallDelta.function.arguments ?? "",
})
// check if tool call is complete
if (
toolCall.function?.name != null &&
toolCall.function?.arguments != null &&
isParsableJson(toolCall.function.arguments)
) {
controller.enqueue({
type: "tool-input-end",
id: toolCall.id,
})
controller.enqueue({
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
toolCall.hasFinished = true
}
}
}
},
flush(controller) {
if (isActiveReasoning) {
controller.enqueue({
type: "reasoning-end",
id: "reasoning-0",
// Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
}
if (isActiveText) {
controller.enqueue({ type: "text-end", id: "txt-0" })
}
// go through all tool calls and send the ones that are not finished
for (const toolCall of toolCalls.filter((toolCall) => !toolCall.hasFinished)) {
controller.enqueue({
type: "tool-input-end",
id: toolCall.id,
})
controller.enqueue({
type: "tool-call",
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
})
}
const providerMetadata: SharedV2ProviderMetadata = {
[providerOptionsName]: {},
// Include reasoning_opaque for Copilot multi-turn reasoning
...(reasoningOpaque ? { copilot: { reasoningOpaque } } : {}),
...metadataExtractor?.buildMetadata(),
}
if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
providerMetadata[providerOptionsName].acceptedPredictionTokens =
usage.completionTokensDetails.acceptedPredictionTokens
}
if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
providerMetadata[providerOptionsName].rejectedPredictionTokens =
usage.completionTokensDetails.rejectedPredictionTokens
}
controller.enqueue({
type: "finish",
finishReason,
usage: {
inputTokens: usage.promptTokens ?? undefined,
outputTokens: usage.completionTokens ?? undefined,
totalTokens: usage.totalTokens ?? undefined,
reasoningTokens: usage.completionTokensDetails.reasoningTokens ?? undefined,
cachedInputTokens: usage.promptTokensDetails.cachedTokens ?? undefined,
},
providerMetadata,
})
},
}),
),
request: { body },
response: { headers: responseHeaders },
}
}
}
const openaiCompatibleTokenUsageSchema = z
.object({
prompt_tokens: z.number().nullish(),
completion_tokens: z.number().nullish(),
total_tokens: z.number().nullish(),
prompt_tokens_details: z
.object({
cached_tokens: z.number().nullish(),
})
.nullish(),
completion_tokens_details: z
.object({
reasoning_tokens: z.number().nullish(),
accepted_prediction_tokens: z.number().nullish(),
rejected_prediction_tokens: z.number().nullish(),
})
.nullish(),
})
.nullish()
// limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency
const OpenAICompatibleChatResponseSchema = z.object({
id: z.string().nullish(),
created: z.number().nullish(),
model: z.string().nullish(),
choices: z.array(
z.object({
message: z.object({
role: z.literal("assistant").nullish(),
content: z.string().nullish(),
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
reasoning_opaque: z.string().nullish(),
tool_calls: z
.array(
z.object({
id: z.string().nullish(),
function: z.object({
name: z.string(),
arguments: z.string(),
}),
}),
)
.nullish(),
}),
finish_reason: z.string().nullish(),
}),
),
usage: openaiCompatibleTokenUsageSchema,
})
// limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency
const createOpenAICompatibleChatChunkSchema = <ERROR_SCHEMA extends z.core.$ZodType>(errorSchema: ERROR_SCHEMA) =>
z.union([
z.object({
id: z.string().nullish(),
created: z.number().nullish(),
model: z.string().nullish(),
choices: z.array(
z.object({
delta: z
.object({
role: z.enum(["assistant"]).nullish(),
content: z.string().nullish(),
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
reasoning_opaque: z.string().nullish(),
tool_calls: z
.array(
z.object({
index: z.number(),
id: z.string().nullish(),
function: z.object({
name: z.string().nullish(),
arguments: z.string().nullish(),
}),
}),
)
.nullish(),
})
.nullish(),
finish_reason: z.string().nullish(),
}),
),
usage: openaiCompatibleTokenUsageSchema,
}),
errorSchema,
])

View File

@@ -0,0 +1,28 @@
import { z } from "zod/v4"
export type OpenAICompatibleChatModelId = string
export const openaiCompatibleProviderOptions = z.object({
/**
* A unique identifier representing your end-user, which can help the provider to
* monitor and detect abuse.
*/
user: z.string().optional(),
/**
* Reasoning effort for reasoning models. Defaults to `medium`.
*/
reasoningEffort: z.string().optional(),
/**
* Controls the verbosity of the generated text. Defaults to `medium`.
*/
textVerbosity: z.string().optional(),
/**
* Copilot thinking_budget used for Anthropic models.
*/
thinking_budget: z.number().optional(),
})
export type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>

View File

@@ -0,0 +1,44 @@
import type { SharedV2ProviderMetadata } from "@ai-sdk/provider"
/**
Extracts provider-specific metadata from API responses.
Used to standardize metadata handling across different LLM providers while allowing
provider-specific metadata to be captured.
*/
export type MetadataExtractor = {
/**
* Extracts provider metadata from a complete, non-streaming response.
*
* @param parsedBody - The parsed response JSON body from the provider's API.
*
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV2ProviderMetadata | undefined>
/**
* Creates an extractor for handling streaming responses. The returned object provides
* methods to process individual chunks and build the final metadata from the accumulated
* stream data.
*
* @returns An object with methods to process chunks and build metadata from a stream
*/
createStreamExtractor: () => {
/**
* Process an individual chunk from the stream. Called for each chunk in the response stream
* to accumulate metadata throughout the streaming process.
*
* @param parsedChunk - The parsed JSON response chunk from the provider's API
*/
processChunk(parsedChunk: unknown): void
/**
* Builds the metadata object after all chunks have been processed.
* Called at the end of the stream to generate the complete provider metadata.
*
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
buildMetadata(): SharedV2ProviderMetadata | undefined
}
}

View File

@@ -0,0 +1,87 @@
import {
type LanguageModelV2CallOptions,
type LanguageModelV2CallWarning,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
export function prepareTools({
tools,
toolChoice,
}: {
tools: LanguageModelV2CallOptions["tools"]
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
}): {
tools:
| undefined
| Array<{
type: "function"
function: {
name: string
description: string | undefined
parameters: unknown
}
}>
toolChoice: { type: "function"; function: { name: string } } | "auto" | "none" | "required" | undefined
toolWarnings: LanguageModelV2CallWarning[]
} {
// when the tools array is empty, change it to undefined to prevent errors:
tools = tools?.length ? tools : undefined
const toolWarnings: LanguageModelV2CallWarning[] = []
if (tools == null) {
return { tools: undefined, toolChoice: undefined, toolWarnings }
}
const openaiCompatTools: Array<{
type: "function"
function: {
name: string
description: string | undefined
parameters: unknown
}
}> = []
for (const tool of tools) {
if (tool.type === "provider-defined") {
toolWarnings.push({ type: "unsupported-tool", tool })
} else {
openaiCompatTools.push({
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: tool.inputSchema,
},
})
}
}
if (toolChoice == null) {
return { tools: openaiCompatTools, toolChoice: undefined, toolWarnings }
}
const type = toolChoice.type
switch (type) {
case "auto":
case "none":
case "required":
return { tools: openaiCompatTools, toolChoice: type, toolWarnings }
case "tool":
return {
tools: openaiCompatTools,
toolChoice: {
type: "function",
function: { name: toolChoice.toolName },
},
toolWarnings,
}
default: {
const _exhaustiveCheck: never = type
throw new UnsupportedFunctionalityError({
functionality: `tool choice type: ${_exhaustiveCheck}`,
})
}
}
}

View File

@@ -0,0 +1,100 @@
import type { LanguageModelV2 } from "@ai-sdk/provider"
import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils"
import { OpenAICompatibleChatLanguageModel } from "./chat/openai-compatible-chat-language-model"
import { OpenAIResponsesLanguageModel } from "./responses/openai-responses-language-model"
// Import the version or define it
const VERSION = "0.1.0"
export type OpenaiCompatibleModelId = string
export interface OpenaiCompatibleProviderSettings {
/**
* API key for authenticating requests.
*/
apiKey?: string
/**
* Base URL for the OpenAI Compatible API calls.
*/
baseURL?: string
/**
* Name of the provider.
*/
name?: string
/**
* Custom headers to include in the requests.
*/
headers?: Record<string, string>
/**
* Custom fetch implementation.
*/
fetch?: FetchFunction
}
export interface OpenaiCompatibleProvider {
(modelId: OpenaiCompatibleModelId): LanguageModelV2
chat(modelId: OpenaiCompatibleModelId): LanguageModelV2
responses(modelId: OpenaiCompatibleModelId): LanguageModelV2
languageModel(modelId: OpenaiCompatibleModelId): LanguageModelV2
// embeddingModel(modelId: any): EmbeddingModelV2
// imageModel(modelId: any): ImageModelV2
}
/**
* Create an OpenAI Compatible provider instance.
*/
export function createOpenaiCompatible(options: OpenaiCompatibleProviderSettings = {}): OpenaiCompatibleProvider {
const baseURL = withoutTrailingSlash(options.baseURL ?? "https://api.openai.com/v1")
if (!baseURL) {
throw new Error("baseURL is required")
}
// Merge headers: defaults first, then user overrides
const headers = {
// Default OpenAI Compatible headers (can be overridden by user)
...(options.apiKey && { Authorization: `Bearer ${options.apiKey}` }),
...options.headers,
}
const getHeaders = () => withUserAgentSuffix(headers, `ai-sdk/openai-compatible/${VERSION}`)
const createChatModel = (modelId: OpenaiCompatibleModelId) => {
return new OpenAICompatibleChatLanguageModel(modelId, {
provider: `${options.name ?? "openai-compatible"}.chat`,
headers: getHeaders,
url: ({ path }) => `${baseURL}${path}`,
fetch: options.fetch,
})
}
const createResponsesModel = (modelId: OpenaiCompatibleModelId) => {
return new OpenAIResponsesLanguageModel(modelId, {
provider: `${options.name ?? "openai-compatible"}.responses`,
headers: getHeaders,
url: ({ path }) => `${baseURL}${path}`,
fetch: options.fetch,
})
}
const createLanguageModel = (modelId: OpenaiCompatibleModelId) => createChatModel(modelId)
const provider = function (modelId: OpenaiCompatibleModelId) {
return createChatModel(modelId)
}
provider.languageModel = createLanguageModel
provider.chat = createChatModel
provider.responses = createResponsesModel
return provider as OpenaiCompatibleProvider
}
// Default OpenAI Compatible provider instance
export const openaiCompatible = createOpenaiCompatible()

View File

@@ -0,0 +1,2 @@
export { createOpenaiCompatible, openaiCompatible } from "./copilot-provider"
export type { OpenaiCompatibleProvider, OpenaiCompatibleProviderSettings } from "./copilot-provider"

View File

@@ -0,0 +1,27 @@
import { z, type ZodType } from "zod/v4"
export const openaiCompatibleErrorDataSchema = z.object({
error: z.object({
message: z.string(),
// The additional information below is handled loosely to support
// OpenAI-compatible providers that have slightly different error
// responses:
type: z.string().nullish(),
param: z.any().nullish(),
code: z.union([z.string(), z.number()]).nullish(),
}),
})
export type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>
export type ProviderErrorStructure<T> = {
errorSchema: ZodType<T>
errorToMessage: (error: T) => string
isRetryable?: (response: Response, error?: T) => boolean
}
export const defaultOpenAICompatibleErrorStructure: ProviderErrorStructure<OpenAICompatibleErrorData> = {
errorSchema: openaiCompatibleErrorDataSchema,
errorToMessage: (data) => data.error.message,
}

View File

@@ -0,0 +1,303 @@
import {
type LanguageModelV2CallWarning,
type LanguageModelV2Prompt,
type LanguageModelV2ToolCallPart,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
import { convertToBase64, parseProviderOptions } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
import type { OpenAIResponsesInput, OpenAIResponsesReasoning } from "./openai-responses-api-types"
import { localShellInputSchema, localShellOutputSchema } from "./tool/local-shell"
/**
* Check if a string is a file ID based on the given prefixes
* Returns false if prefixes is undefined (disables file ID detection)
*/
function isFileId(data: string, prefixes?: readonly string[]): boolean {
if (!prefixes) return false
return prefixes.some((prefix) => data.startsWith(prefix))
}
export async function convertToOpenAIResponsesInput({
prompt,
systemMessageMode,
fileIdPrefixes,
store,
hasLocalShellTool = false,
}: {
prompt: LanguageModelV2Prompt
systemMessageMode: "system" | "developer" | "remove"
fileIdPrefixes?: readonly string[]
store: boolean
hasLocalShellTool?: boolean
}): Promise<{
input: OpenAIResponsesInput
warnings: Array<LanguageModelV2CallWarning>
}> {
const input: OpenAIResponsesInput = []
const warnings: Array<LanguageModelV2CallWarning> = []
for (const { role, content } of prompt) {
switch (role) {
case "system": {
switch (systemMessageMode) {
case "system": {
input.push({ role: "system", content })
break
}
case "developer": {
input.push({ role: "developer", content })
break
}
case "remove": {
warnings.push({
type: "other",
message: "system messages are removed for this model",
})
break
}
default: {
const _exhaustiveCheck: never = systemMessageMode
throw new Error(`Unsupported system message mode: ${_exhaustiveCheck}`)
}
}
break
}
case "user": {
input.push({
role: "user",
content: content.map((part, index) => {
switch (part.type) {
case "text": {
return { type: "input_text", text: part.text }
}
case "file": {
if (part.mediaType.startsWith("image/")) {
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType
return {
type: "input_image",
...(part.data instanceof URL
? { image_url: part.data.toString() }
: typeof part.data === "string" && isFileId(part.data, fileIdPrefixes)
? { file_id: part.data }
: {
image_url: `data:${mediaType};base64,${convertToBase64(part.data)}`,
}),
detail: part.providerOptions?.openai?.imageDetail,
}
} else if (part.mediaType === "application/pdf") {
if (part.data instanceof URL) {
return {
type: "input_file",
file_url: part.data.toString(),
}
}
return {
type: "input_file",
...(typeof part.data === "string" && isFileId(part.data, fileIdPrefixes)
? { file_id: part.data }
: {
filename: part.filename ?? `part-${index}.pdf`,
file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`,
}),
}
} else {
throw new UnsupportedFunctionalityError({
functionality: `file part media type ${part.mediaType}`,
})
}
}
}
}),
})
break
}
case "assistant": {
const reasoningMessages: Record<string, OpenAIResponsesReasoning> = {}
const toolCallParts: Record<string, LanguageModelV2ToolCallPart> = {}
for (const part of content) {
switch (part.type) {
case "text": {
input.push({
role: "assistant",
content: [{ type: "output_text", text: part.text }],
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
})
break
}
case "tool-call": {
toolCallParts[part.toolCallId] = part
if (part.providerExecuted) {
break
}
if (hasLocalShellTool && part.toolName === "local_shell") {
const parsedInput = localShellInputSchema.parse(part.input)
input.push({
type: "local_shell_call",
call_id: part.toolCallId,
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
action: {
type: "exec",
command: parsedInput.action.command,
timeout_ms: parsedInput.action.timeoutMs,
user: parsedInput.action.user,
working_directory: parsedInput.action.workingDirectory,
env: parsedInput.action.env,
},
})
break
}
input.push({
type: "function_call",
call_id: part.toolCallId,
name: part.toolName,
arguments: JSON.stringify(part.input),
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
})
break
}
// assistant tool result parts are from provider-executed tools:
case "tool-result": {
if (store) {
// use item references to refer to tool results from built-in tools
input.push({ type: "item_reference", id: part.toolCallId })
} else {
warnings.push({
type: "other",
message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`,
})
}
break
}
case "reasoning": {
const providerOptions = await parseProviderOptions({
provider: "copilot",
providerOptions: part.providerOptions,
schema: openaiResponsesReasoningProviderOptionsSchema,
})
const reasoningId = providerOptions?.itemId
if (reasoningId != null) {
const reasoningMessage = reasoningMessages[reasoningId]
if (store) {
if (reasoningMessage === undefined) {
// use item references to refer to reasoning (single reference)
input.push({ type: "item_reference", id: reasoningId })
// store unused reasoning message to mark id as used
reasoningMessages[reasoningId] = {
type: "reasoning",
id: reasoningId,
summary: [],
}
}
} else {
const summaryParts: Array<{
type: "summary_text"
text: string
}> = []
if (part.text.length > 0) {
summaryParts.push({
type: "summary_text",
text: part.text,
})
} else if (reasoningMessage !== undefined) {
warnings.push({
type: "other",
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`,
})
}
if (reasoningMessage === undefined) {
reasoningMessages[reasoningId] = {
type: "reasoning",
id: reasoningId,
encrypted_content: providerOptions?.reasoningEncryptedContent,
summary: summaryParts,
}
input.push(reasoningMessages[reasoningId])
} else {
reasoningMessage.summary.push(...summaryParts)
}
}
} else {
warnings.push({
type: "other",
message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`,
})
}
break
}
}
}
break
}
case "tool": {
for (const part of content) {
const output = part.output
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
input.push({
type: "local_shell_call_output",
call_id: part.toolCallId,
output: localShellOutputSchema.parse(output.value).output,
})
break
}
let contentValue: string
switch (output.type) {
case "text":
case "error-text":
contentValue = output.value
break
case "content":
case "json":
case "error-json":
contentValue = JSON.stringify(output.value)
break
}
input.push({
type: "function_call_output",
call_id: part.toolCallId,
output: contentValue,
})
}
break
}
default: {
const _exhaustiveCheck: never = role
throw new Error(`Unsupported role: ${_exhaustiveCheck}`)
}
}
}
return { input, warnings }
}
const openaiResponsesReasoningProviderOptionsSchema = z.object({
itemId: z.string().nullish(),
reasoningEncryptedContent: z.string().nullish(),
})
export type OpenAIResponsesReasoningProviderOptions = z.infer<typeof openaiResponsesReasoningProviderOptionsSchema>

View File

@@ -0,0 +1,22 @@
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
export function mapOpenAIResponseFinishReason({
finishReason,
hasFunctionCall,
}: {
finishReason: string | null | undefined
// flag that checks if there have been client-side tool calls (not executed by openai)
hasFunctionCall: boolean
}): LanguageModelV2FinishReason {
switch (finishReason) {
case undefined:
case null:
return hasFunctionCall ? "tool-calls" : "stop"
case "max_output_tokens":
return "length"
case "content_filter":
return "content-filter"
default:
return hasFunctionCall ? "tool-calls" : "unknown"
}
}

View File

@@ -0,0 +1,18 @@
import type { FetchFunction } from "@ai-sdk/provider-utils"
export type OpenAIConfig = {
provider: string
url: (options: { modelId: string; path: string }) => string
headers: () => Record<string, string | undefined>
fetch?: FetchFunction
generateId?: () => string
/**
* File ID prefixes used to identify file IDs in Responses API.
* When undefined, all file data is treated as base64 content.
*
* Examples:
* - OpenAI: ['file-'] for IDs like 'file-abc123'
* - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
*/
fileIdPrefixes?: readonly string[]
}

View File

@@ -0,0 +1,22 @@
import { z } from "zod/v4"
import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils"
export const openaiErrorDataSchema = z.object({
error: z.object({
message: z.string(),
// The additional information below is handled loosely to support
// OpenAI-compatible providers that have slightly different error
// responses:
type: z.string().nullish(),
param: z.any().nullish(),
code: z.union([z.string(), z.number()]).nullish(),
}),
})
export type OpenAIErrorData = z.infer<typeof openaiErrorDataSchema>
export const openaiFailedResponseHandler: any = createJsonErrorResponseHandler({
errorSchema: openaiErrorDataSchema,
errorToMessage: (data) => data.error.message,
})

View File

@@ -0,0 +1,207 @@
import type { JSONSchema7 } from "@ai-sdk/provider"
export type OpenAIResponsesInput = Array<OpenAIResponsesInputItem>
export type OpenAIResponsesInputItem =
| OpenAIResponsesSystemMessage
| OpenAIResponsesUserMessage
| OpenAIResponsesAssistantMessage
| OpenAIResponsesFunctionCall
| OpenAIResponsesFunctionCallOutput
| OpenAIResponsesComputerCall
| OpenAIResponsesLocalShellCall
| OpenAIResponsesLocalShellCallOutput
| OpenAIResponsesReasoning
| OpenAIResponsesItemReference
export type OpenAIResponsesIncludeValue =
| "web_search_call.action.sources"
| "code_interpreter_call.outputs"
| "computer_call_output.output.image_url"
| "file_search_call.results"
| "message.input_image.image_url"
| "message.output_text.logprobs"
| "reasoning.encrypted_content"
export type OpenAIResponsesIncludeOptions = Array<OpenAIResponsesIncludeValue> | undefined | null
export type OpenAIResponsesSystemMessage = {
role: "system" | "developer"
content: string
}
export type OpenAIResponsesUserMessage = {
role: "user"
content: Array<
| { type: "input_text"; text: string }
| { type: "input_image"; image_url: string }
| { type: "input_image"; file_id: string }
| { type: "input_file"; file_url: string }
| { type: "input_file"; filename: string; file_data: string }
| { type: "input_file"; file_id: string }
>
}
export type OpenAIResponsesAssistantMessage = {
role: "assistant"
content: Array<{ type: "output_text"; text: string }>
id?: string
}
export type OpenAIResponsesFunctionCall = {
type: "function_call"
call_id: string
name: string
arguments: string
id?: string
}
export type OpenAIResponsesFunctionCallOutput = {
type: "function_call_output"
call_id: string
output: string
}
export type OpenAIResponsesComputerCall = {
type: "computer_call"
id: string
status?: string
}
export type OpenAIResponsesLocalShellCall = {
type: "local_shell_call"
id: string
call_id: string
action: {
type: "exec"
command: string[]
timeout_ms?: number
user?: string
working_directory?: string
env?: Record<string, string>
}
}
export type OpenAIResponsesLocalShellCallOutput = {
type: "local_shell_call_output"
call_id: string
output: string
}
export type OpenAIResponsesItemReference = {
type: "item_reference"
id: string
}
/**
* A filter used to compare a specified attribute key to a given value using a defined comparison operation.
*/
export type OpenAIResponsesFileSearchToolComparisonFilter = {
/**
* The key to compare against the value.
*/
key: string
/**
* Specifies the comparison operator: eq, ne, gt, gte, lt, lte.
*/
type: "eq" | "ne" | "gt" | "gte" | "lt" | "lte"
/**
* The value to compare against the attribute key; supports string, number, or boolean types.
*/
value: string | number | boolean
}
/**
* Combine multiple filters using and or or.
*/
export type OpenAIResponsesFileSearchToolCompoundFilter = {
/**
* Type of operation: and or or.
*/
type: "and" | "or"
/**
* Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
*/
filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>
}
export type OpenAIResponsesTool =
| {
type: "function"
name: string
description: string | undefined
parameters: JSONSchema7
strict: boolean | undefined
}
| {
type: "web_search"
filters: { allowed_domains: string[] | undefined } | undefined
search_context_size: "low" | "medium" | "high" | undefined
user_location:
| {
type: "approximate"
city?: string
country?: string
region?: string
timezone?: string
}
| undefined
}
| {
type: "web_search_preview"
search_context_size: "low" | "medium" | "high" | undefined
user_location:
| {
type: "approximate"
city?: string
country?: string
region?: string
timezone?: string
}
| undefined
}
| {
type: "code_interpreter"
container: string | { type: "auto"; file_ids: string[] | undefined }
}
| {
type: "file_search"
vector_store_ids: string[]
max_num_results: number | undefined
ranking_options: { ranker?: string; score_threshold?: number } | undefined
filters: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter | undefined
}
| {
type: "image_generation"
background: "auto" | "opaque" | "transparent" | undefined
input_fidelity: "low" | "high" | undefined
input_image_mask:
| {
file_id: string | undefined
image_url: string | undefined
}
| undefined
model: string | undefined
moderation: "auto" | undefined
output_compression: number | undefined
output_format: "png" | "jpeg" | "webp" | undefined
partial_images: number | undefined
quality: "auto" | "low" | "medium" | "high" | undefined
size: "auto" | "1024x1024" | "1024x1536" | "1536x1024" | undefined
}
| {
type: "local_shell"
}
export type OpenAIResponsesReasoning = {
type: "reasoning"
id: string
encrypted_content?: string | null
summary: Array<{
type: "summary_text"
text: string
}>
}

View File

@@ -0,0 +1,177 @@
import {
type LanguageModelV2CallOptions,
type LanguageModelV2CallWarning,
UnsupportedFunctionalityError,
} from "@ai-sdk/provider"
import { codeInterpreterArgsSchema } from "./tool/code-interpreter"
import { fileSearchArgsSchema } from "./tool/file-search"
import { webSearchArgsSchema } from "./tool/web-search"
import { webSearchPreviewArgsSchema } from "./tool/web-search-preview"
import { imageGenerationArgsSchema } from "./tool/image-generation"
import type { OpenAIResponsesTool } from "./openai-responses-api-types"
export function prepareResponsesTools({
tools,
toolChoice,
strictJsonSchema,
}: {
tools: LanguageModelV2CallOptions["tools"]
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
strictJsonSchema: boolean
}): {
tools?: Array<OpenAIResponsesTool>
toolChoice?:
| "auto"
| "none"
| "required"
| { type: "file_search" }
| { type: "web_search_preview" }
| { type: "web_search" }
| { type: "function"; name: string }
| { type: "code_interpreter" }
| { type: "image_generation" }
toolWarnings: LanguageModelV2CallWarning[]
} {
// when the tools array is empty, change it to undefined to prevent errors:
tools = tools?.length ? tools : undefined
const toolWarnings: LanguageModelV2CallWarning[] = []
if (tools == null) {
return { tools: undefined, toolChoice: undefined, toolWarnings }
}
const openaiTools: Array<OpenAIResponsesTool> = []
for (const tool of tools) {
switch (tool.type) {
case "function":
openaiTools.push({
type: "function",
name: tool.name,
description: tool.description,
parameters: tool.inputSchema,
strict: strictJsonSchema,
})
break
case "provider-defined": {
switch (tool.id) {
case "openai.file_search": {
const args = fileSearchArgsSchema.parse(tool.args)
openaiTools.push({
type: "file_search",
vector_store_ids: args.vectorStoreIds,
max_num_results: args.maxNumResults,
ranking_options: args.ranking
? {
ranker: args.ranking.ranker,
score_threshold: args.ranking.scoreThreshold,
}
: undefined,
filters: args.filters,
})
break
}
case "openai.local_shell": {
openaiTools.push({
type: "local_shell",
})
break
}
case "openai.web_search_preview": {
const args = webSearchPreviewArgsSchema.parse(tool.args)
openaiTools.push({
type: "web_search_preview",
search_context_size: args.searchContextSize,
user_location: args.userLocation,
})
break
}
case "openai.web_search": {
const args = webSearchArgsSchema.parse(tool.args)
openaiTools.push({
type: "web_search",
filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : undefined,
search_context_size: args.searchContextSize,
user_location: args.userLocation,
})
break
}
case "openai.code_interpreter": {
const args = codeInterpreterArgsSchema.parse(tool.args)
openaiTools.push({
type: "code_interpreter",
container:
args.container == null
? { type: "auto", file_ids: undefined }
: typeof args.container === "string"
? args.container
: { type: "auto", file_ids: args.container.fileIds },
})
break
}
case "openai.image_generation": {
const args = imageGenerationArgsSchema.parse(tool.args)
openaiTools.push({
type: "image_generation",
background: args.background,
input_fidelity: args.inputFidelity,
input_image_mask: args.inputImageMask
? {
file_id: args.inputImageMask.fileId,
image_url: args.inputImageMask.imageUrl,
}
: undefined,
model: args.model,
moderation: args.moderation,
partial_images: args.partialImages,
quality: args.quality,
output_compression: args.outputCompression,
output_format: args.outputFormat,
size: args.size,
})
break
}
}
break
}
default:
toolWarnings.push({ type: "unsupported-tool", tool })
break
}
}
if (toolChoice == null) {
return { tools: openaiTools, toolChoice: undefined, toolWarnings }
}
const type = toolChoice.type
switch (type) {
case "auto":
case "none":
case "required":
return { tools: openaiTools, toolChoice: type, toolWarnings }
case "tool":
return {
tools: openaiTools,
toolChoice:
toolChoice.toolName === "code_interpreter" ||
toolChoice.toolName === "file_search" ||
toolChoice.toolName === "image_generation" ||
toolChoice.toolName === "web_search_preview" ||
toolChoice.toolName === "web_search"
? { type: toolChoice.toolName }
: { type: "function", name: toolChoice.toolName },
toolWarnings,
}
default: {
const _exhaustiveCheck: never = type
throw new UnsupportedFunctionalityError({
functionality: `tool choice type: ${_exhaustiveCheck}`,
})
}
}
}

View File

@@ -0,0 +1 @@
export type OpenAIResponsesModelId = string

View File

@@ -0,0 +1,88 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const codeInterpreterInputSchema = z.object({
code: z.string().nullish(),
containerId: z.string(),
})
export const codeInterpreterOutputSchema = z.object({
outputs: z
.array(
z.discriminatedUnion("type", [
z.object({ type: z.literal("logs"), logs: z.string() }),
z.object({ type: z.literal("image"), url: z.string() }),
]),
)
.nullish(),
})
export const codeInterpreterArgsSchema = z.object({
container: z
.union([
z.string(),
z.object({
fileIds: z.array(z.string()).optional(),
}),
])
.optional(),
})
type CodeInterpreterArgs = {
/**
* The code interpreter container.
* Can be a container ID
* or an object that specifies uploaded file IDs to make available to your code.
*/
container?: string | { fileIds?: string[] }
}
export const codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
{
/**
* The code to run, or null if not available.
*/
code?: string | null
/**
* The ID of the container used to run the code.
*/
containerId: string
},
{
/**
* The outputs generated by the code interpreter, such as logs or images.
* Can be null if no outputs are available.
*/
outputs?: Array<
| {
type: "logs"
/**
* The logs output from the code interpreter.
*/
logs: string
}
| {
type: "image"
/**
* The URL of the image output from the code interpreter.
*/
url: string
}
> | null
},
CodeInterpreterArgs
>({
id: "openai.code_interpreter",
name: "code_interpreter",
inputSchema: codeInterpreterInputSchema,
outputSchema: codeInterpreterOutputSchema,
})
export const codeInterpreter = (
args: CodeInterpreterArgs = {}, // default
) => {
return codeInterpreterToolFactory(args)
}

View File

@@ -0,0 +1,128 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import type {
OpenAIResponsesFileSearchToolComparisonFilter,
OpenAIResponsesFileSearchToolCompoundFilter,
} from "../openai-responses-api-types"
import { z } from "zod/v4"
const comparisonFilterSchema = z.object({
key: z.string(),
type: z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
value: z.union([z.string(), z.number(), z.boolean()]),
})
const compoundFilterSchema: z.ZodType<any> = z.object({
type: z.enum(["and", "or"]),
filters: z.array(z.union([comparisonFilterSchema, z.lazy(() => compoundFilterSchema)])),
})
export const fileSearchArgsSchema = z.object({
vectorStoreIds: z.array(z.string()),
maxNumResults: z.number().optional(),
ranking: z
.object({
ranker: z.string().optional(),
scoreThreshold: z.number().optional(),
})
.optional(),
filters: z.union([comparisonFilterSchema, compoundFilterSchema]).optional(),
})
export const fileSearchOutputSchema = z.object({
queries: z.array(z.string()),
results: z
.array(
z.object({
attributes: z.record(z.string(), z.unknown()),
fileId: z.string(),
filename: z.string(),
score: z.number(),
text: z.string(),
}),
)
.nullable(),
})
export const fileSearch = createProviderDefinedToolFactoryWithOutputSchema<
{},
{
/**
* The search query to execute.
*/
queries: string[]
/**
* The results of the file search tool call.
*/
results:
| null
| {
/**
* Set of 16 key-value pairs that can be attached to an object.
* This can be useful for storing additional information about the object
* in a structured format, and querying for objects via API or the dashboard.
* Keys are strings with a maximum length of 64 characters.
* Values are strings with a maximum length of 512 characters, booleans, or numbers.
*/
attributes: Record<string, unknown>
/**
* The unique ID of the file.
*/
fileId: string
/**
* The name of the file.
*/
filename: string
/**
* The relevance score of the file - a value between 0 and 1.
*/
score: number
/**
* The text that was retrieved from the file.
*/
text: string
}[]
},
{
/**
* List of vector store IDs to search through.
*/
vectorStoreIds: string[]
/**
* Maximum number of search results to return. Defaults to 10.
*/
maxNumResults?: number
/**
* Ranking options for the search.
*/
ranking?: {
/**
* The ranker to use for the file search.
*/
ranker?: string
/**
* The score threshold for the file search, a number between 0 and 1.
* Numbers closer to 1 will attempt to return only the most relevant results,
* but may return fewer results.
*/
scoreThreshold?: number
}
/**
* A filter to apply.
*/
filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter
}
>({
id: "openai.file_search",
name: "file_search",
inputSchema: z.object({}),
outputSchema: fileSearchOutputSchema,
})

View File

@@ -0,0 +1,115 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const imageGenerationArgsSchema = z
.object({
background: z.enum(["auto", "opaque", "transparent"]).optional(),
inputFidelity: z.enum(["low", "high"]).optional(),
inputImageMask: z
.object({
fileId: z.string().optional(),
imageUrl: z.string().optional(),
})
.optional(),
model: z.string().optional(),
moderation: z.enum(["auto"]).optional(),
outputCompression: z.number().int().min(0).max(100).optional(),
outputFormat: z.enum(["png", "jpeg", "webp"]).optional(),
partialImages: z.number().int().min(0).max(3).optional(),
quality: z.enum(["auto", "low", "medium", "high"]).optional(),
size: z.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional(),
})
.strict()
export const imageGenerationOutputSchema = z.object({
result: z.string(),
})
type ImageGenerationArgs = {
/**
* Background type for the generated image. Default is 'auto'.
*/
background?: "auto" | "opaque" | "transparent"
/**
* Input fidelity for the generated image. Default is 'low'.
*/
inputFidelity?: "low" | "high"
/**
* Optional mask for inpainting.
* Contains image_url (string, optional) and file_id (string, optional).
*/
inputImageMask?: {
/**
* File ID for the mask image.
*/
fileId?: string
/**
* Base64-encoded mask image.
*/
imageUrl?: string
}
/**
* The image generation model to use. Default: gpt-image-1.
*/
model?: string
/**
* Moderation level for the generated image. Default: auto.
*/
moderation?: "auto"
/**
* Compression level for the output image. Default: 100.
*/
outputCompression?: number
/**
* The output format of the generated image. One of png, webp, or jpeg.
* Default: png
*/
outputFormat?: "png" | "jpeg" | "webp"
/**
* Number of partial images to generate in streaming mode, from 0 (default value) to 3.
*/
partialImages?: number
/**
* The quality of the generated image.
* One of low, medium, high, or auto. Default: auto.
*/
quality?: "auto" | "low" | "medium" | "high"
/**
* The size of the generated image.
* One of 1024x1024, 1024x1536, 1536x1024, or auto.
* Default: auto.
*/
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024"
}
const imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
{},
{
/**
* The generated image encoded in base64.
*/
result: string
},
ImageGenerationArgs
>({
id: "openai.image_generation",
name: "image_generation",
inputSchema: z.object({}),
outputSchema: imageGenerationOutputSchema,
})
export const imageGeneration = (
args: ImageGenerationArgs = {}, // default
) => {
return imageGenerationToolFactory(args)
}

View File

@@ -0,0 +1,65 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const localShellInputSchema = z.object({
action: z.object({
type: z.literal("exec"),
command: z.array(z.string()),
timeoutMs: z.number().optional(),
user: z.string().optional(),
workingDirectory: z.string().optional(),
env: z.record(z.string(), z.string()).optional(),
}),
})
export const localShellOutputSchema = z.object({
output: z.string(),
})
export const localShell = createProviderDefinedToolFactoryWithOutputSchema<
{
/**
* Execute a shell command on the server.
*/
action: {
type: "exec"
/**
* The command to run.
*/
command: string[]
/**
* Optional timeout in milliseconds for the command.
*/
timeoutMs?: number
/**
* Optional user to run the command as.
*/
user?: string
/**
* Optional working directory to run the command in.
*/
workingDirectory?: string
/**
* Environment variables to set for the command.
*/
env?: Record<string, string>
}
},
{
/**
* The output of local shell tool call.
*/
output: string
},
{}
>({
id: "openai.local_shell",
name: "local_shell",
inputSchema: localShellInputSchema,
outputSchema: localShellOutputSchema,
})

View File

@@ -0,0 +1,104 @@
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
// Args validation schema
export const webSearchPreviewArgsSchema = z.object({
/**
* Search context size to use for the web search.
* - high: Most comprehensive context, highest cost, slower response
* - medium: Balanced context, cost, and latency (default)
* - low: Least context, lowest cost, fastest response
*/
searchContextSize: z.enum(["low", "medium", "high"]).optional(),
/**
* User location information to provide geographically relevant search results.
*/
userLocation: z
.object({
/**
* Type of location (always 'approximate')
*/
type: z.literal("approximate"),
/**
* Two-letter ISO country code (e.g., 'US', 'GB')
*/
country: z.string().optional(),
/**
* City name (free text, e.g., 'Minneapolis')
*/
city: z.string().optional(),
/**
* Region name (free text, e.g., 'Minnesota')
*/
region: z.string().optional(),
/**
* IANA timezone (e.g., 'America/Chicago')
*/
timezone: z.string().optional(),
})
.optional(),
})
export const webSearchPreview = createProviderDefinedToolFactory<
{
// Web search doesn't take input parameters - it's controlled by the prompt
},
{
/**
* Search context size to use for the web search.
* - high: Most comprehensive context, highest cost, slower response
* - medium: Balanced context, cost, and latency (default)
* - low: Least context, lowest cost, fastest response
*/
searchContextSize?: "low" | "medium" | "high"
/**
* User location information to provide geographically relevant search results.
*/
userLocation?: {
/**
* Type of location (always 'approximate')
*/
type: "approximate"
/**
* Two-letter ISO country code (e.g., 'US', 'GB')
*/
country?: string
/**
* City name (free text, e.g., 'Minneapolis')
*/
city?: string
/**
* Region name (free text, e.g., 'Minnesota')
*/
region?: string
/**
* IANA timezone (e.g., 'America/Chicago')
*/
timezone?: string
}
}
>({
id: "openai.web_search_preview",
name: "web_search_preview",
inputSchema: z.object({
action: z
.discriminatedUnion("type", [
z.object({
type: z.literal("search"),
query: z.string().nullish(),
}),
z.object({
type: z.literal("open_page"),
url: z.string(),
}),
z.object({
type: z.literal("find"),
url: z.string(),
pattern: z.string(),
}),
])
.nullish(),
}),
})

View File

@@ -0,0 +1,103 @@
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"
export const webSearchArgsSchema = z.object({
filters: z
.object({
allowedDomains: z.array(z.string()).optional(),
})
.optional(),
searchContextSize: z.enum(["low", "medium", "high"]).optional(),
userLocation: z
.object({
type: z.literal("approximate"),
country: z.string().optional(),
city: z.string().optional(),
region: z.string().optional(),
timezone: z.string().optional(),
})
.optional(),
})
export const webSearchToolFactory = createProviderDefinedToolFactory<
{
// Web search doesn't take input parameters - it's controlled by the prompt
},
{
/**
* Filters for the search.
*/
filters?: {
/**
* Allowed domains for the search.
* If not provided, all domains are allowed.
* Subdomains of the provided domains are allowed as well.
*/
allowedDomains?: string[]
}
/**
* Search context size to use for the web search.
* - high: Most comprehensive context, highest cost, slower response
* - medium: Balanced context, cost, and latency (default)
* - low: Least context, lowest cost, fastest response
*/
searchContextSize?: "low" | "medium" | "high"
/**
* User location information to provide geographically relevant search results.
*/
userLocation?: {
/**
* Type of location (always 'approximate')
*/
type: "approximate"
/**
* Two-letter ISO country code (e.g., 'US', 'GB')
*/
country?: string
/**
* City name (free text, e.g., 'Minneapolis')
*/
city?: string
/**
* Region name (free text, e.g., 'Minnesota')
*/
region?: string
/**
* IANA timezone (e.g., 'America/Chicago')
*/
timezone?: string
}
}
>({
id: "openai.web_search",
name: "web_search",
inputSchema: z.object({
action: z
.discriminatedUnion("type", [
z.object({
type: z.literal("search"),
query: z.string().nullish(),
}),
z.object({
type: z.literal("open_page"),
url: z.string(),
}),
z.object({
type: z.literal("find"),
url: z.string(),
pattern: z.string(),
}),
])
.nullish(),
}),
})
export const webSearch = (
args: Parameters<typeof webSearchToolFactory>[0] = {}, // default
) => {
return webSearchToolFactory(args)
}

File diff suppressed because it is too large Load Diff