mirror of
https://gitea.toothfairyai.com/ToothFairyAI/tf_code.git
synced 2026-04-04 16:13:11 +00:00
feat: add litellmProxy provider option for explicit LiteLLM compatibility (#8658)
Co-authored-by: Mark Henderson <Mark.Henderson99@hotmail.com> Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com>
This commit is contained in:
@@ -10,6 +10,8 @@ import {
|
||||
type Tool,
|
||||
type ToolSet,
|
||||
extractReasoningMiddleware,
|
||||
tool,
|
||||
jsonSchema,
|
||||
} from "ai"
|
||||
import { clone, mergeDeep, pipe } from "remeda"
|
||||
import { ProviderTransform } from "@/provider/transform"
|
||||
@@ -140,6 +142,26 @@ export namespace LLM {
|
||||
|
||||
const tools = await resolveTools(input)
|
||||
|
||||
// LiteLLM and some Anthropic proxies require the tools parameter to be present
|
||||
// when message history contains tool calls, even if no tools are being used.
|
||||
// Add a dummy tool that is never called to satisfy this validation.
|
||||
// This is enabled for:
|
||||
// 1. Providers with "litellm" in their ID or API ID (auto-detected)
|
||||
// 2. Providers with explicit "litellmProxy: true" option (opt-in for custom gateways)
|
||||
const isLiteLLMProxy =
|
||||
provider.options?.["litellmProxy"] === true ||
|
||||
input.model.providerID.toLowerCase().includes("litellm") ||
|
||||
input.model.api.id.toLowerCase().includes("litellm")
|
||||
|
||||
if (isLiteLLMProxy && Object.keys(tools).length === 0 && hasToolCalls(input.messages)) {
|
||||
tools["_noop"] = tool({
|
||||
description:
|
||||
"Placeholder for LiteLLM/Anthropic proxy compatibility - required when message history contains tool calls but no active tools are needed",
|
||||
inputSchema: jsonSchema({ type: "object", properties: {} }),
|
||||
execute: async () => ({ output: "", title: "", metadata: {} }),
|
||||
})
|
||||
}
|
||||
|
||||
return streamText({
|
||||
onError(error) {
|
||||
l.error("stream error", {
|
||||
@@ -171,7 +193,7 @@ export namespace LLM {
|
||||
topP: params.topP,
|
||||
topK: params.topK,
|
||||
providerOptions: ProviderTransform.providerOptions(input.model, params.options),
|
||||
activeTools: Object.keys(tools).filter((x) => x !== "invalid"),
|
||||
activeTools: Object.keys(tools).filter((x) => x !== "invalid" && x !== "_noop"),
|
||||
tools,
|
||||
maxOutputTokens,
|
||||
abortSignal: input.abort,
|
||||
@@ -238,4 +260,16 @@ export namespace LLM {
|
||||
}
|
||||
return input.tools
|
||||
}
|
||||
|
||||
// Check if messages contain any tool-call content
|
||||
// Used to determine if a dummy tool should be added for LiteLLM proxy compatibility
|
||||
export function hasToolCalls(messages: ModelMessage[]): boolean {
|
||||
for (const msg of messages) {
|
||||
if (!Array.isArray(msg.content)) continue
|
||||
for (const part of msg.content) {
|
||||
if (part.type === "tool-call" || part.type === "tool-result") return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -533,6 +533,17 @@ export namespace MessageV2 {
|
||||
errorText: part.state.error,
|
||||
callProviderMetadata: part.metadata,
|
||||
})
|
||||
// Handle pending/running tool calls to prevent dangling tool_use blocks
|
||||
// Anthropic/Claude APIs require every tool_use to have a corresponding tool_result
|
||||
if (part.state.status === "pending" || part.state.status === "running")
|
||||
assistantMessage.parts.push({
|
||||
type: ("tool-" + part.tool) as `tool-${string}`,
|
||||
state: "output-error",
|
||||
toolCallId: part.callID,
|
||||
input: part.state.input,
|
||||
errorText: "[Tool execution was interrupted]",
|
||||
callProviderMetadata: part.metadata,
|
||||
})
|
||||
}
|
||||
if (part.type === "reasoning") {
|
||||
assistantMessage.parts.push({
|
||||
|
||||
Reference in New Issue
Block a user