feat(opencode): add copilot specific provider to properly handle copilot reasoning tokens (#8900)

Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com>
Co-authored-by: Aiden Cline <aidenpcline@gmail.com>
This commit is contained in:
Steffen Deusch
2026-01-31 02:53:22 +01:00
committed by GitHub
parent 2f4374c829
commit d9f18e4006
33 changed files with 2381 additions and 17 deletions

View File

@@ -24,7 +24,7 @@ import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
import { createOpenAI } from "@ai-sdk/openai"
import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider"
import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/openai-compatible/src"
import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/copilot"
import { createXai } from "@ai-sdk/xai"
import { createMistral } from "@ai-sdk/mistral"
import { createGroq } from "@ai-sdk/groq"

View File

@@ -0,0 +1,177 @@
import {
type LanguageModelV2Prompt,
type SharedV2ProviderMetadata,
UnsupportedFunctionalityError,
} from '@ai-sdk/provider';
import type { OpenAICompatibleChatPrompt } from './openai-compatible-api-types';
import { convertToBase64 } from '@ai-sdk/provider-utils';
function getOpenAIMetadata(message: {
providerOptions?: SharedV2ProviderMetadata;
}) {
return message?.providerOptions?.copilot ?? {};
}
export function convertToOpenAICompatibleChatMessages(
prompt: LanguageModelV2Prompt,
): OpenAICompatibleChatPrompt {
const messages: OpenAICompatibleChatPrompt = [];
for (const { role, content, ...message } of prompt) {
const metadata = getOpenAIMetadata({ ...message });
switch (role) {
case 'system': {
messages.push({
role: 'system',
content: [
{
type: 'text',
text: content,
},
],
...metadata,
});
break;
}
case 'user': {
if (content.length === 1 && content[0].type === 'text') {
messages.push({
role: 'user',
content: content[0].text,
...getOpenAIMetadata(content[0]),
});
break;
}
messages.push({
role: 'user',
content: content.map(part => {
const partMetadata = getOpenAIMetadata(part);
switch (part.type) {
case 'text': {
return { type: 'text', text: part.text, ...partMetadata };
}
case 'file': {
if (part.mediaType.startsWith('image/')) {
const mediaType =
part.mediaType === 'image/*'
? 'image/jpeg'
: part.mediaType;
return {
type: 'image_url',
image_url: {
url:
part.data instanceof URL
? part.data.toString()
: `data:${mediaType};base64,${convertToBase64(part.data)}`,
},
...partMetadata,
};
} else {
throw new UnsupportedFunctionalityError({
functionality: `file part media type ${part.mediaType}`,
});
}
}
}
}),
...metadata,
});
break;
}
case 'assistant': {
let text = '';
let reasoningText: string | undefined;
let reasoningOpaque: string | undefined;
const toolCalls: Array<{
id: string;
type: 'function';
function: { name: string; arguments: string };
}> = [];
for (const part of content) {
const partMetadata = getOpenAIMetadata(part);
// Check for reasoningOpaque on any part (may be attached to text/tool-call)
const partOpaque = (
part.providerOptions as { copilot?: { reasoningOpaque?: string } }
)?.copilot?.reasoningOpaque;
if (partOpaque && !reasoningOpaque) {
reasoningOpaque = partOpaque;
}
switch (part.type) {
case 'text': {
text += part.text;
break;
}
case 'reasoning': {
reasoningText = part.text;
break;
}
case 'tool-call': {
toolCalls.push({
id: part.toolCallId,
type: 'function',
function: {
name: part.toolName,
arguments: JSON.stringify(part.input),
},
...partMetadata,
});
break;
}
}
}
messages.push({
role: 'assistant',
content: text || null,
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
reasoning_text: reasoningText,
reasoning_opaque: reasoningOpaque,
...metadata,
});
break;
}
case 'tool': {
for (const toolResponse of content) {
const output = toolResponse.output;
let contentValue: string;
switch (output.type) {
case 'text':
case 'error-text':
contentValue = output.value;
break;
case 'content':
case 'json':
case 'error-json':
contentValue = JSON.stringify(output.value);
break;
}
const toolResponseMetadata = getOpenAIMetadata(toolResponse);
messages.push({
role: 'tool',
tool_call_id: toolResponse.toolCallId,
content: contentValue,
...toolResponseMetadata,
});
}
break;
}
default: {
const _exhaustiveCheck: never = role;
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
}
}
}
return messages;
}

View File

@@ -0,0 +1,15 @@
export function getResponseMetadata({
id,
model,
created,
}: {
id?: string | undefined | null;
created?: number | undefined | null;
model?: string | undefined | null;
}) {
return {
id: id ?? undefined,
modelId: model ?? undefined,
timestamp: created != null ? new Date(created * 1000) : undefined,
};
}

View File

@@ -0,0 +1,19 @@
import type { LanguageModelV2FinishReason } from '@ai-sdk/provider';
export function mapOpenAICompatibleFinishReason(
finishReason: string | null | undefined,
): LanguageModelV2FinishReason {
switch (finishReason) {
case 'stop':
return 'stop';
case 'length':
return 'length';
case 'content_filter':
return 'content-filter';
case 'function_call':
case 'tool_calls':
return 'tool-calls';
default:
return 'unknown';
}
}

View File

@@ -0,0 +1,74 @@
import type { JSONValue } from '@ai-sdk/provider';
export type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>;
export type OpenAICompatibleMessage =
| OpenAICompatibleSystemMessage
| OpenAICompatibleUserMessage
| OpenAICompatibleAssistantMessage
| OpenAICompatibleToolMessage;
// Allow for arbitrary additional properties for general purpose
// provider-metadata-specific extensibility.
type JsonRecord<T = never> = Record<
string,
JSONValue | JSONValue[] | T | T[] | undefined
>;
export interface OpenAICompatibleSystemMessage
extends JsonRecord<OpenAICompatibleSystemContentPart> {
role: 'system';
content: string | Array<OpenAICompatibleSystemContentPart>;
}
export interface OpenAICompatibleSystemContentPart
extends JsonRecord {
type: 'text';
text: string;
}
export interface OpenAICompatibleUserMessage
extends JsonRecord<OpenAICompatibleContentPart> {
role: 'user';
content: string | Array<OpenAICompatibleContentPart>;
}
export type OpenAICompatibleContentPart =
| OpenAICompatibleContentPartText
| OpenAICompatibleContentPartImage;
export interface OpenAICompatibleContentPartImage extends JsonRecord {
type: 'image_url';
image_url: { url: string };
}
export interface OpenAICompatibleContentPartText extends JsonRecord {
type: 'text';
text: string;
}
export interface OpenAICompatibleAssistantMessage
extends JsonRecord<OpenAICompatibleMessageToolCall> {
role: 'assistant';
content?: string | null;
tool_calls?: Array<OpenAICompatibleMessageToolCall>;
// Copilot-specific reasoning fields
reasoning_text?: string;
reasoning_opaque?: string;
}
export interface OpenAICompatibleMessageToolCall extends JsonRecord {
type: 'function';
id: string;
function: {
arguments: string;
name: string;
};
}
export interface OpenAICompatibleToolMessage
extends JsonRecord {
role: 'tool';
content: string;
tool_call_id: string;
}

View File

@@ -0,0 +1,832 @@
import {
APICallError,
InvalidResponseDataError,
type LanguageModelV2,
type LanguageModelV2CallWarning,
type LanguageModelV2Content,
type LanguageModelV2FinishReason,
type LanguageModelV2StreamPart,
type SharedV2ProviderMetadata,
} from '@ai-sdk/provider';
import {
combineHeaders,
createEventSourceResponseHandler,
createJsonErrorResponseHandler,
createJsonResponseHandler,
type FetchFunction,
generateId,
isParsableJson,
parseProviderOptions,
type ParseResult,
postJsonToApi,
type ResponseHandler,
} from '@ai-sdk/provider-utils';
import { z } from 'zod/v4';
import { convertToOpenAICompatibleChatMessages } from './convert-to-openai-compatible-chat-messages';
import { getResponseMetadata } from './get-response-metadata';
import { mapOpenAICompatibleFinishReason } from './map-openai-compatible-finish-reason';
import {
type OpenAICompatibleChatModelId,
openaiCompatibleProviderOptions,
} from './openai-compatible-chat-options';
import {
defaultOpenAICompatibleErrorStructure,
type ProviderErrorStructure,
} from '../openai-compatible-error';
import type { MetadataExtractor } from './openai-compatible-metadata-extractor';
import { prepareTools } from './openai-compatible-prepare-tools';
export type OpenAICompatibleChatConfig = {
provider: string;
headers: () => Record<string, string | undefined>;
url: (options: { modelId: string; path: string }) => string;
fetch?: FetchFunction;
includeUsage?: boolean;
errorStructure?: ProviderErrorStructure<any>;
metadataExtractor?: MetadataExtractor;
/**
* Whether the model supports structured outputs.
*/
supportsStructuredOutputs?: boolean;
/**
* The supported URLs for the model.
*/
supportedUrls?: () => LanguageModelV2['supportedUrls'];
};
export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
readonly specificationVersion = 'v2';
readonly supportsStructuredOutputs: boolean;
readonly modelId: OpenAICompatibleChatModelId;
private readonly config: OpenAICompatibleChatConfig;
private readonly failedResponseHandler: ResponseHandler<APICallError>;
private readonly chunkSchema; // type inferred via constructor
constructor(
modelId: OpenAICompatibleChatModelId,
config: OpenAICompatibleChatConfig,
) {
this.modelId = modelId;
this.config = config;
// initialize error handling:
const errorStructure =
config.errorStructure ?? defaultOpenAICompatibleErrorStructure;
this.chunkSchema = createOpenAICompatibleChatChunkSchema(
errorStructure.errorSchema,
);
this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
this.supportsStructuredOutputs = config.supportsStructuredOutputs ?? false;
}
get provider(): string {
return this.config.provider;
}
private get providerOptionsName(): string {
return this.config.provider.split('.')[0].trim();
}
get supportedUrls() {
return this.config.supportedUrls?.() ?? {};
}
private async getArgs({
prompt,
maxOutputTokens,
temperature,
topP,
topK,
frequencyPenalty,
presencePenalty,
providerOptions,
stopSequences,
responseFormat,
seed,
toolChoice,
tools,
}: Parameters<LanguageModelV2['doGenerate']>[0]) {
const warnings: LanguageModelV2CallWarning[] = [];
// Parse provider options
const compatibleOptions = Object.assign(
(await parseProviderOptions({
provider: 'copilot',
providerOptions,
schema: openaiCompatibleProviderOptions,
})) ?? {},
(await parseProviderOptions({
provider: this.providerOptionsName,
providerOptions,
schema: openaiCompatibleProviderOptions,
})) ?? {},
);
if (topK != null) {
warnings.push({ type: 'unsupported-setting', setting: 'topK' });
}
if (
responseFormat?.type === 'json' &&
responseFormat.schema != null &&
!this.supportsStructuredOutputs
) {
warnings.push({
type: 'unsupported-setting',
setting: 'responseFormat',
details:
'JSON response format schema is only supported with structuredOutputs',
});
}
const {
tools: openaiTools,
toolChoice: openaiToolChoice,
toolWarnings,
} = prepareTools({
tools,
toolChoice,
});
return {
args: {
// model id:
model: this.modelId,
// model specific settings:
user: compatibleOptions.user,
// standardized settings:
max_tokens: maxOutputTokens,
temperature,
top_p: topP,
frequency_penalty: frequencyPenalty,
presence_penalty: presencePenalty,
response_format:
responseFormat?.type === 'json'
? this.supportsStructuredOutputs === true &&
responseFormat.schema != null
? {
type: 'json_schema',
json_schema: {
schema: responseFormat.schema,
name: responseFormat.name ?? 'response',
description: responseFormat.description,
},
}
: { type: 'json_object' }
: undefined,
stop: stopSequences,
seed,
...Object.fromEntries(
Object.entries(
providerOptions?.[this.providerOptionsName] ?? {},
).filter(
([key]) =>
!Object.keys(openaiCompatibleProviderOptions.shape).includes(key),
),
),
reasoning_effort: compatibleOptions.reasoningEffort,
verbosity: compatibleOptions.textVerbosity,
// messages:
messages: convertToOpenAICompatibleChatMessages(prompt),
// tools:
tools: openaiTools,
tool_choice: openaiToolChoice,
// thinking_budget
thinking_budget: compatibleOptions.thinking_budget,
},
warnings: [...warnings, ...toolWarnings],
};
}
async doGenerate(
options: Parameters<LanguageModelV2['doGenerate']>[0],
): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>> {
const { args, warnings } = await this.getArgs({ ...options });
const body = JSON.stringify(args);
const {
responseHeaders,
value: responseBody,
rawValue: rawResponse,
} = await postJsonToApi({
url: this.config.url({
path: '/chat/completions',
modelId: this.modelId,
}),
headers: combineHeaders(this.config.headers(), options.headers),
body: args,
failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createJsonResponseHandler(
OpenAICompatibleChatResponseSchema,
),
abortSignal: options.abortSignal,
fetch: this.config.fetch,
});
const choice = responseBody.choices[0];
const content: Array<LanguageModelV2Content> = [];
// text content:
const text = choice.message.content;
if (text != null && text.length > 0) {
content.push({ type: 'text', text });
}
// reasoning content (Copilot uses reasoning_text):
const reasoning = choice.message.reasoning_text;
if (reasoning != null && reasoning.length > 0) {
content.push({
type: 'reasoning',
text: reasoning,
// Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
});
}
// tool calls:
if (choice.message.tool_calls != null) {
for (const toolCall of choice.message.tool_calls) {
content.push({
type: 'tool-call',
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments!,
});
}
}
// provider metadata:
const providerMetadata: SharedV2ProviderMetadata = {
[this.providerOptionsName]: {},
...(await this.config.metadataExtractor?.extractMetadata?.({
parsedBody: rawResponse,
})),
};
const completionTokenDetails =
responseBody.usage?.completion_tokens_details;
if (completionTokenDetails?.accepted_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].acceptedPredictionTokens =
completionTokenDetails?.accepted_prediction_tokens;
}
if (completionTokenDetails?.rejected_prediction_tokens != null) {
providerMetadata[this.providerOptionsName].rejectedPredictionTokens =
completionTokenDetails?.rejected_prediction_tokens;
}
return {
content,
finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
usage: {
inputTokens: responseBody.usage?.prompt_tokens ?? undefined,
outputTokens: responseBody.usage?.completion_tokens ?? undefined,
totalTokens: responseBody.usage?.total_tokens ?? undefined,
reasoningTokens:
responseBody.usage?.completion_tokens_details?.reasoning_tokens ??
undefined,
cachedInputTokens:
responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
},
providerMetadata,
request: { body },
response: {
...getResponseMetadata(responseBody),
headers: responseHeaders,
body: rawResponse,
},
warnings,
};
}
async doStream(
options: Parameters<LanguageModelV2['doStream']>[0],
): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>> {
const { args, warnings } = await this.getArgs({ ...options });
const body = {
...args,
stream: true,
// only include stream_options when in strict compatibility mode:
stream_options: this.config.includeUsage
? { include_usage: true }
: undefined,
};
const metadataExtractor =
this.config.metadataExtractor?.createStreamExtractor();
const { responseHeaders, value: response } = await postJsonToApi({
url: this.config.url({
path: '/chat/completions',
modelId: this.modelId,
}),
headers: combineHeaders(this.config.headers(), options.headers),
body,
failedResponseHandler: this.failedResponseHandler,
successfulResponseHandler: createEventSourceResponseHandler(
this.chunkSchema,
),
abortSignal: options.abortSignal,
fetch: this.config.fetch,
});
const toolCalls: Array<{
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
hasFinished: boolean;
}> = [];
let finishReason: LanguageModelV2FinishReason = 'unknown';
const usage: {
completionTokens: number | undefined;
completionTokensDetails: {
reasoningTokens: number | undefined;
acceptedPredictionTokens: number | undefined;
rejectedPredictionTokens: number | undefined;
};
promptTokens: number | undefined;
promptTokensDetails: {
cachedTokens: number | undefined;
};
totalTokens: number | undefined;
} = {
completionTokens: undefined,
completionTokensDetails: {
reasoningTokens: undefined,
acceptedPredictionTokens: undefined,
rejectedPredictionTokens: undefined,
},
promptTokens: undefined,
promptTokensDetails: {
cachedTokens: undefined,
},
totalTokens: undefined,
};
let isFirstChunk = true;
const providerOptionsName = this.providerOptionsName;
let isActiveReasoning = false;
let isActiveText = false;
let reasoningOpaque: string | undefined;
return {
stream: response.pipeThrough(
new TransformStream<
ParseResult<z.infer<typeof this.chunkSchema>>,
LanguageModelV2StreamPart
>({
start(controller) {
controller.enqueue({ type: 'stream-start', warnings });
},
// TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
transform(chunk, controller) {
// Emit raw chunk if requested (before anything else)
if (options.includeRawChunks) {
controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });
}
// handle failed chunk parsing / validation:
if (!chunk.success) {
finishReason = 'error';
controller.enqueue({ type: 'error', error: chunk.error });
return;
}
const value = chunk.value;
metadataExtractor?.processChunk(chunk.rawValue);
// handle error chunks:
if ('error' in value) {
finishReason = 'error';
controller.enqueue({ type: 'error', error: value.error.message });
return;
}
if (isFirstChunk) {
isFirstChunk = false;
controller.enqueue({
type: 'response-metadata',
...getResponseMetadata(value),
});
}
if (value.usage != null) {
const {
prompt_tokens,
completion_tokens,
total_tokens,
prompt_tokens_details,
completion_tokens_details,
} = value.usage;
usage.promptTokens = prompt_tokens ?? undefined;
usage.completionTokens = completion_tokens ?? undefined;
usage.totalTokens = total_tokens ?? undefined;
if (completion_tokens_details?.reasoning_tokens != null) {
usage.completionTokensDetails.reasoningTokens =
completion_tokens_details?.reasoning_tokens;
}
if (
completion_tokens_details?.accepted_prediction_tokens != null
) {
usage.completionTokensDetails.acceptedPredictionTokens =
completion_tokens_details?.accepted_prediction_tokens;
}
if (
completion_tokens_details?.rejected_prediction_tokens != null
) {
usage.completionTokensDetails.rejectedPredictionTokens =
completion_tokens_details?.rejected_prediction_tokens;
}
if (prompt_tokens_details?.cached_tokens != null) {
usage.promptTokensDetails.cachedTokens =
prompt_tokens_details?.cached_tokens;
}
}
const choice = value.choices[0];
if (choice?.finish_reason != null) {
finishReason = mapOpenAICompatibleFinishReason(
choice.finish_reason,
);
}
if (choice?.delta == null) {
return;
}
const delta = choice.delta;
// Capture reasoning_opaque for Copilot multi-turn reasoning
if (delta.reasoning_opaque) {
if (reasoningOpaque != null) {
throw new InvalidResponseDataError({
data: delta,
message:
'Multiple reasoning_opaque values received in a single response. Only one thinking part per response is supported.',
});
}
reasoningOpaque = delta.reasoning_opaque;
}
// enqueue reasoning before text deltas (Copilot uses reasoning_text):
const reasoningContent = delta.reasoning_text;
if (reasoningContent) {
if (!isActiveReasoning) {
controller.enqueue({
type: 'reasoning-start',
id: 'reasoning-0',
});
isActiveReasoning = true;
}
controller.enqueue({
type: 'reasoning-delta',
id: 'reasoning-0',
delta: reasoningContent,
});
}
if (delta.content) {
// If reasoning was active and we're starting text, end reasoning first
// This handles the case where reasoning_opaque and content come in the same chunk
if (isActiveReasoning && !isActiveText) {
controller.enqueue({
type: 'reasoning-end',
id: 'reasoning-0',
providerMetadata: reasoningOpaque
? { copilot: { reasoningOpaque } }
: undefined,
});
isActiveReasoning = false;
}
if (!isActiveText) {
controller.enqueue({ type: 'text-start', id: 'txt-0' });
isActiveText = true;
}
controller.enqueue({
type: 'text-delta',
id: 'txt-0',
delta: delta.content,
});
}
if (delta.tool_calls != null) {
// If reasoning was active and we're starting tool calls, end reasoning first
// This handles the case where reasoning goes directly to tool calls with no content
if (isActiveReasoning) {
controller.enqueue({
type: 'reasoning-end',
id: 'reasoning-0',
providerMetadata: reasoningOpaque
? { copilot: { reasoningOpaque } }
: undefined,
});
isActiveReasoning = false;
}
for (const toolCallDelta of delta.tool_calls) {
const index = toolCallDelta.index;
if (toolCalls[index] == null) {
if (toolCallDelta.id == null) {
throw new InvalidResponseDataError({
data: toolCallDelta,
message: `Expected 'id' to be a string.`,
});
}
if (toolCallDelta.function?.name == null) {
throw new InvalidResponseDataError({
data: toolCallDelta,
message: `Expected 'function.name' to be a string.`,
});
}
controller.enqueue({
type: 'tool-input-start',
id: toolCallDelta.id,
toolName: toolCallDelta.function.name,
});
toolCalls[index] = {
id: toolCallDelta.id,
type: 'function',
function: {
name: toolCallDelta.function.name,
arguments: toolCallDelta.function.arguments ?? '',
},
hasFinished: false,
};
const toolCall = toolCalls[index];
if (
toolCall.function?.name != null &&
toolCall.function?.arguments != null
) {
// send delta if the argument text has already started:
if (toolCall.function.arguments.length > 0) {
controller.enqueue({
type: 'tool-input-delta',
id: toolCall.id,
delta: toolCall.function.arguments,
});
}
// check if tool call is complete
// (some providers send the full tool call in one chunk):
if (isParsableJson(toolCall.function.arguments)) {
controller.enqueue({
type: 'tool-input-end',
id: toolCall.id,
});
controller.enqueue({
type: 'tool-call',
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
});
toolCall.hasFinished = true;
}
}
continue;
}
// existing tool call, merge if not finished
const toolCall = toolCalls[index];
if (toolCall.hasFinished) {
continue;
}
if (toolCallDelta.function?.arguments != null) {
toolCall.function!.arguments +=
toolCallDelta.function?.arguments ?? '';
}
// send delta
controller.enqueue({
type: 'tool-input-delta',
id: toolCall.id,
delta: toolCallDelta.function.arguments ?? '',
});
// check if tool call is complete
if (
toolCall.function?.name != null &&
toolCall.function?.arguments != null &&
isParsableJson(toolCall.function.arguments)
) {
controller.enqueue({
type: 'tool-input-end',
id: toolCall.id,
});
controller.enqueue({
type: 'tool-call',
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
});
toolCall.hasFinished = true;
}
}
}
},
flush(controller) {
if (isActiveReasoning) {
controller.enqueue({
type: 'reasoning-end',
id: 'reasoning-0',
// Include reasoning_opaque for Copilot multi-turn reasoning
providerMetadata: reasoningOpaque
? { copilot: { reasoningOpaque } }
: undefined,
});
}
if (isActiveText) {
controller.enqueue({ type: 'text-end', id: 'txt-0' });
}
// go through all tool calls and send the ones that are not finished
for (const toolCall of toolCalls.filter(
toolCall => !toolCall.hasFinished,
)) {
controller.enqueue({
type: 'tool-input-end',
id: toolCall.id,
});
controller.enqueue({
type: 'tool-call',
toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name,
input: toolCall.function.arguments,
});
}
const providerMetadata: SharedV2ProviderMetadata = {
[providerOptionsName]: {},
// Include reasoning_opaque for Copilot multi-turn reasoning
...(reasoningOpaque
? { copilot: { reasoningOpaque } }
: {}),
...metadataExtractor?.buildMetadata(),
};
if (
usage.completionTokensDetails.acceptedPredictionTokens != null
) {
providerMetadata[providerOptionsName].acceptedPredictionTokens =
usage.completionTokensDetails.acceptedPredictionTokens;
}
if (
usage.completionTokensDetails.rejectedPredictionTokens != null
) {
providerMetadata[providerOptionsName].rejectedPredictionTokens =
usage.completionTokensDetails.rejectedPredictionTokens;
}
controller.enqueue({
type: 'finish',
finishReason,
usage: {
inputTokens: usage.promptTokens ?? undefined,
outputTokens: usage.completionTokens ?? undefined,
totalTokens: usage.totalTokens ?? undefined,
reasoningTokens:
usage.completionTokensDetails.reasoningTokens ?? undefined,
cachedInputTokens:
usage.promptTokensDetails.cachedTokens ?? undefined,
},
providerMetadata,
});
},
}),
),
request: { body },
response: { headers: responseHeaders },
};
}
}
const openaiCompatibleTokenUsageSchema = z
.object({
prompt_tokens: z.number().nullish(),
completion_tokens: z.number().nullish(),
total_tokens: z.number().nullish(),
prompt_tokens_details: z
.object({
cached_tokens: z.number().nullish(),
})
.nullish(),
completion_tokens_details: z
.object({
reasoning_tokens: z.number().nullish(),
accepted_prediction_tokens: z.number().nullish(),
rejected_prediction_tokens: z.number().nullish(),
})
.nullish(),
})
.nullish();
// limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency
const OpenAICompatibleChatResponseSchema = z.object({
id: z.string().nullish(),
created: z.number().nullish(),
model: z.string().nullish(),
choices: z.array(
z.object({
message: z.object({
role: z.literal('assistant').nullish(),
content: z.string().nullish(),
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
reasoning_opaque: z.string().nullish(),
tool_calls: z
.array(
z.object({
id: z.string().nullish(),
function: z.object({
name: z.string(),
arguments: z.string(),
}),
}),
)
.nullish(),
}),
finish_reason: z.string().nullish(),
}),
),
usage: openaiCompatibleTokenUsageSchema,
});
// limited version of the schema, focussed on what is needed for the implementation
// this approach limits breakages when the API changes and increases efficiency
const createOpenAICompatibleChatChunkSchema = <
ERROR_SCHEMA extends z.core.$ZodType,
>(
errorSchema: ERROR_SCHEMA,
) =>
z.union([
z.object({
id: z.string().nullish(),
created: z.number().nullish(),
model: z.string().nullish(),
choices: z.array(
z.object({
delta: z
.object({
role: z.enum(['assistant']).nullish(),
content: z.string().nullish(),
// Copilot-specific reasoning fields
reasoning_text: z.string().nullish(),
reasoning_opaque: z.string().nullish(),
tool_calls: z
.array(
z.object({
index: z.number(),
id: z.string().nullish(),
function: z.object({
name: z.string().nullish(),
arguments: z.string().nullish(),
}),
}),
)
.nullish(),
})
.nullish(),
finish_reason: z.string().nullish(),
}),
),
usage: openaiCompatibleTokenUsageSchema,
}),
errorSchema,
]);

View File

@@ -0,0 +1,30 @@
import { z } from 'zod/v4';
export type OpenAICompatibleChatModelId = string;
export const openaiCompatibleProviderOptions = z.object({
/**
* A unique identifier representing your end-user, which can help the provider to
* monitor and detect abuse.
*/
user: z.string().optional(),
/**
* Reasoning effort for reasoning models. Defaults to `medium`.
*/
reasoningEffort: z.string().optional(),
/**
* Controls the verbosity of the generated text. Defaults to `medium`.
*/
textVerbosity: z.string().optional(),
/**
* Copilot thinking_budget used for Anthropic models.
*/
thinking_budget: z.number().optional(),
});
export type OpenAICompatibleProviderOptions = z.infer<
typeof openaiCompatibleProviderOptions
>;

View File

@@ -0,0 +1,48 @@
import type { SharedV2ProviderMetadata } from '@ai-sdk/provider';
/**
Extracts provider-specific metadata from API responses.
Used to standardize metadata handling across different LLM providers while allowing
provider-specific metadata to be captured.
*/
export type MetadataExtractor = {
/**
* Extracts provider metadata from a complete, non-streaming response.
*
* @param parsedBody - The parsed response JSON body from the provider's API.
*
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
extractMetadata: ({
parsedBody,
}: {
parsedBody: unknown;
}) => Promise<SharedV2ProviderMetadata | undefined>;
/**
* Creates an extractor for handling streaming responses. The returned object provides
* methods to process individual chunks and build the final metadata from the accumulated
* stream data.
*
* @returns An object with methods to process chunks and build metadata from a stream
*/
createStreamExtractor: () => {
/**
* Process an individual chunk from the stream. Called for each chunk in the response stream
* to accumulate metadata throughout the streaming process.
*
* @param parsedChunk - The parsed JSON response chunk from the provider's API
*/
processChunk(parsedChunk: unknown): void;
/**
* Builds the metadata object after all chunks have been processed.
* Called at the end of the stream to generate the complete provider metadata.
*
* @returns Provider-specific metadata or undefined if no metadata is available.
* The metadata should be under a key indicating the provider id.
*/
buildMetadata(): SharedV2ProviderMetadata | undefined;
};
};

View File

@@ -0,0 +1,92 @@
import {
type LanguageModelV2CallOptions,
type LanguageModelV2CallWarning,
UnsupportedFunctionalityError,
} from '@ai-sdk/provider';
export function prepareTools({
tools,
toolChoice,
}: {
tools: LanguageModelV2CallOptions['tools'];
toolChoice?: LanguageModelV2CallOptions['toolChoice'];
}): {
tools:
| undefined
| Array<{
type: 'function';
function: {
name: string;
description: string | undefined;
parameters: unknown;
};
}>;
toolChoice:
| { type: 'function'; function: { name: string } }
| 'auto'
| 'none'
| 'required'
| undefined;
toolWarnings: LanguageModelV2CallWarning[];
} {
// when the tools array is empty, change it to undefined to prevent errors:
tools = tools?.length ? tools : undefined;
const toolWarnings: LanguageModelV2CallWarning[] = [];
if (tools == null) {
return { tools: undefined, toolChoice: undefined, toolWarnings };
}
const openaiCompatTools: Array<{
type: 'function';
function: {
name: string;
description: string | undefined;
parameters: unknown;
};
}> = [];
for (const tool of tools) {
if (tool.type === 'provider-defined') {
toolWarnings.push({ type: 'unsupported-tool', tool });
} else {
openaiCompatTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: tool.inputSchema,
},
});
}
}
if (toolChoice == null) {
return { tools: openaiCompatTools, toolChoice: undefined, toolWarnings };
}
const type = toolChoice.type;
switch (type) {
case 'auto':
case 'none':
case 'required':
return { tools: openaiCompatTools, toolChoice: type, toolWarnings };
case 'tool':
return {
tools: openaiCompatTools,
toolChoice: {
type: 'function',
function: { name: toolChoice.toolName },
},
toolWarnings,
};
default: {
const _exhaustiveCheck: never = type;
throw new UnsupportedFunctionalityError({
functionality: `tool choice type: ${_exhaustiveCheck}`,
});
}
}
}

View File

@@ -1,6 +1,6 @@
import type { LanguageModelV2 } from "@ai-sdk/provider"
import { OpenAICompatibleChatLanguageModel } from "@ai-sdk/openai-compatible"
import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils"
import { OpenAICompatibleChatLanguageModel } from "./chat/openai-compatible-chat-language-model"
import { OpenAIResponsesLanguageModel } from "./responses/openai-responses-language-model"
// Import the version or define it

View File

@@ -0,0 +1,2 @@
export { createOpenaiCompatible, openaiCompatible } from "./copilot-provider"
export type { OpenaiCompatibleProvider, OpenaiCompatibleProviderSettings } from "./copilot-provider"

View File

@@ -0,0 +1,30 @@
import { z, type ZodType } from 'zod/v4';
export const openaiCompatibleErrorDataSchema = z.object({
error: z.object({
message: z.string(),
// The additional information below is handled loosely to support
// OpenAI-compatible providers that have slightly different error
// responses:
type: z.string().nullish(),
param: z.any().nullish(),
code: z.union([z.string(), z.number()]).nullish(),
}),
});
export type OpenAICompatibleErrorData = z.infer<
typeof openaiCompatibleErrorDataSchema
>;
export type ProviderErrorStructure<T> = {
errorSchema: ZodType<T>;
errorToMessage: (error: T) => string;
isRetryable?: (response: Response, error?: T) => boolean;
};
export const defaultOpenAICompatibleErrorStructure: ProviderErrorStructure<OpenAICompatibleErrorData> =
{
errorSchema: openaiCompatibleErrorDataSchema,
errorToMessage: data => data.error.message,
};

View File

@@ -183,7 +183,7 @@ export async function convertToOpenAIResponsesInput({
case "reasoning": {
const providerOptions = await parseProviderOptions({
provider: "openai",
provider: "copilot",
providerOptions: part.providerOptions,
schema: openaiResponsesReasoningProviderOptionsSchema,
})

View File

@@ -194,7 +194,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
}
const openaiOptions = await parseProviderOptions({
provider: "openai",
provider: "copilot",
providerOptions,
schema: openaiResponsesProviderOptionsSchema,
})

View File

@@ -1,2 +0,0 @@
export { createOpenaiCompatible, openaiCompatible } from "./openai-compatible-provider"
export type { OpenaiCompatibleProvider, OpenaiCompatibleProviderSettings } from "./openai-compatible-provider"

View File

@@ -20,6 +20,7 @@ export namespace ProviderTransform {
function sdkKey(npm: string): string | undefined {
switch (npm) {
case "@ai-sdk/github-copilot":
return "copilot"
case "@ai-sdk/openai":
case "@ai-sdk/azure":
return "openai"
@@ -179,6 +180,9 @@ export namespace ProviderTransform {
openaiCompatible: {
cache_control: { type: "ephemeral" },
},
copilot: {
copilot_cache_control: { type: "ephemeral" },
},
}
for (const msg of unique([...system, ...final])) {
@@ -353,6 +357,15 @@ export namespace ProviderTransform {
return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
case "@ai-sdk/github-copilot":
if (model.id.includes("gemini")) {
// currently github copilot only returns thinking
return {}
}
if (model.id.includes("claude")) {
return {
thinking: { thinking_budget: 4000 },
}
}
const copilotEfforts = iife(() => {
if (id.includes("5.1-codex-max") || id.includes("5.2")) return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
return WIDELY_SUPPORTED_EFFORTS

View File

@@ -148,14 +148,15 @@ export namespace LLM {
},
)
const maxOutputTokens = isCodex
? undefined
: ProviderTransform.maxOutputTokens(
input.model.api.npm,
params.options,
input.model.limit.output,
OUTPUT_TOKEN_MAX,
)
const maxOutputTokens =
isCodex || provider.id.includes("github-copilot")
? undefined
: ProviderTransform.maxOutputTokens(
input.model.api.npm,
params.options,
input.model.limit.output,
OUTPUT_TOKEN_MAX,
)
const tools = await resolveTools(input)