feat: add Gemini API compatibility, refactor stream rendering, and enhance tool call handling and configuration options

This commit is contained in:
CJACK
2026-02-22 20:53:42 +08:00
parent ae7dce0b32
commit a9403c5392
21 changed files with 581 additions and 297 deletions

View File

@@ -1,113 +0,0 @@
package util
// BuildOpenAIChatStreamDeltaChoice is kept for backward compatibility.
// Prefer internal/format/openai.BuildChatStreamDeltaChoice for new code.
func BuildOpenAIChatStreamDeltaChoice(index int, delta map[string]any) map[string]any {
return map[string]any{
"delta": delta,
"index": index,
}
}
// BuildOpenAIChatStreamFinishChoice is kept for backward compatibility.
// Prefer internal/format/openai.BuildChatStreamFinishChoice for new code.
func BuildOpenAIChatStreamFinishChoice(index int, finishReason string) map[string]any {
return map[string]any{
"delta": map[string]any{},
"index": index,
"finish_reason": finishReason,
}
}
// BuildOpenAIChatStreamChunk is kept for backward compatibility.
// Prefer internal/format/openai.BuildChatStreamChunk for new code.
func BuildOpenAIChatStreamChunk(completionID string, created int64, model string, choices []map[string]any, usage map[string]any) map[string]any {
out := map[string]any{
"id": completionID,
"object": "chat.completion.chunk",
"created": created,
"model": model,
"choices": choices,
}
if len(usage) > 0 {
out["usage"] = usage
}
return out
}
// BuildOpenAIChatUsage is kept for backward compatibility.
// Prefer internal/format/openai.BuildChatUsage for new code.
func BuildOpenAIChatUsage(finalPrompt, finalThinking, finalText string) map[string]any {
promptTokens := EstimateTokens(finalPrompt)
reasoningTokens := EstimateTokens(finalThinking)
completionTokens := EstimateTokens(finalText)
return map[string]any{
"prompt_tokens": promptTokens,
"completion_tokens": reasoningTokens + completionTokens,
"total_tokens": promptTokens + reasoningTokens + completionTokens,
"completion_tokens_details": map[string]any{
"reasoning_tokens": reasoningTokens,
},
}
}
// BuildOpenAIResponsesCreatedPayload is kept for backward compatibility.
// Prefer internal/format/openai.BuildResponsesCreatedPayload for new code.
func BuildOpenAIResponsesCreatedPayload(responseID, model string) map[string]any {
return map[string]any{
"type": "response.created",
"id": responseID,
"object": "response",
"model": model,
"status": "in_progress",
}
}
// BuildOpenAIResponsesTextDeltaPayload is kept for backward compatibility.
// Prefer internal/format/openai.BuildResponsesTextDeltaPayload for new code.
func BuildOpenAIResponsesTextDeltaPayload(responseID, delta string) map[string]any {
return map[string]any{
"type": "response.output_text.delta",
"id": responseID,
"delta": delta,
}
}
// BuildOpenAIResponsesReasoningDeltaPayload is kept for backward compatibility.
// Prefer internal/format/openai.BuildResponsesReasoningDeltaPayload for new code.
func BuildOpenAIResponsesReasoningDeltaPayload(responseID, delta string) map[string]any {
return map[string]any{
"type": "response.reasoning.delta",
"id": responseID,
"delta": delta,
}
}
// BuildOpenAIResponsesToolCallDeltaPayload is kept for backward compatibility.
// Prefer internal/format/openai.BuildResponsesToolCallDeltaPayload for new code.
func BuildOpenAIResponsesToolCallDeltaPayload(responseID string, toolCalls []map[string]any) map[string]any {
return map[string]any{
"type": "response.output_tool_call.delta",
"id": responseID,
"tool_calls": toolCalls,
}
}
// BuildOpenAIResponsesToolCallDonePayload is kept for backward compatibility.
// Prefer internal/format/openai.BuildResponsesToolCallDonePayload for new code.
func BuildOpenAIResponsesToolCallDonePayload(responseID string, toolCalls []map[string]any) map[string]any {
return map[string]any{
"type": "response.output_tool_call.done",
"id": responseID,
"tool_calls": toolCalls,
}
}
// BuildOpenAIResponsesCompletedPayload is kept for backward compatibility.
// Prefer internal/format/openai.BuildResponsesCompletedPayload for new code.
func BuildOpenAIResponsesCompletedPayload(response map[string]any) map[string]any {
return map[string]any{
"type": "response.completed",
"response": response,
}
}

View File

@@ -1,48 +0,0 @@
package util
import "testing"
func TestBuildOpenAIChatStreamChunk(t *testing.T) {
chunk := BuildOpenAIChatStreamChunk(
"cid",
123,
"deepseek-chat",
[]map[string]any{BuildOpenAIChatStreamDeltaChoice(0, map[string]any{"role": "assistant"})},
nil,
)
if chunk["object"] != "chat.completion.chunk" {
t.Fatalf("unexpected object: %#v", chunk["object"])
}
choices, _ := chunk["choices"].([]map[string]any)
if len(choices) == 0 {
rawChoices, _ := chunk["choices"].([]any)
if len(rawChoices) == 0 {
t.Fatalf("expected choices")
}
}
}
func TestBuildOpenAIChatUsage(t *testing.T) {
usage := BuildOpenAIChatUsage("prompt", "think", "answer")
if _, ok := usage["prompt_tokens"]; !ok {
t.Fatalf("expected prompt_tokens")
}
if _, ok := usage["completion_tokens_details"]; !ok {
t.Fatalf("expected completion_tokens_details")
}
}
func TestBuildOpenAIResponsesEventPayloads(t *testing.T) {
created := BuildOpenAIResponsesCreatedPayload("resp_1", "gpt-4o")
if created["type"] != "response.created" {
t.Fatalf("unexpected type: %#v", created["type"])
}
done := BuildOpenAIResponsesToolCallDonePayload("resp_1", []map[string]any{{"index": 0}})
if done["type"] != "response.output_tool_call.done" {
t.Fatalf("unexpected type: %#v", done["type"])
}
completed := BuildOpenAIResponsesCompletedPayload(map[string]any{"id": "resp_1"})
if completed["type"] != "response.completed" {
t.Fatalf("unexpected type: %#v", completed["type"])
}
}

View File

@@ -92,17 +92,29 @@ func filterToolCallsDetailed(parsed []ParsedToolCall, availableToolNames []strin
for _, name := range availableToolNames {
allowed[name] = struct{}{}
}
if len(allowed) == 0 {
rejectedSet := map[string]struct{}{}
for _, tc := range parsed {
if tc.Name == "" {
continue
}
rejectedSet[tc.Name] = struct{}{}
}
rejected := make([]string, 0, len(rejectedSet))
for name := range rejectedSet {
rejected = append(rejected, name)
}
return nil, rejected
}
out := make([]ParsedToolCall, 0, len(parsed))
rejectedSet := map[string]struct{}{}
for _, tc := range parsed {
if tc.Name == "" {
continue
}
if len(allowed) > 0 {
if _, ok := allowed[tc.Name]; !ok {
rejectedSet[tc.Name] = struct{}{}
continue
}
if _, ok := allowed[tc.Name]; !ok {
rejectedSet[tc.Name] = struct{}{}
continue
}
if tc.Input == nil {
tc.Input = map[string]any{}

View File

@@ -60,6 +60,20 @@ func TestParseToolCallsDetailedMarksPolicyRejection(t *testing.T) {
}
}
func TestParseToolCallsDetailedRejectsWhenAllowListEmpty(t *testing.T) {
text := `{"tool_calls":[{"name":"search","input":{"q":"go"}}]}`
res := ParseToolCallsDetailed(text, nil)
if !res.SawToolCallSyntax {
t.Fatalf("expected SawToolCallSyntax=true, got %#v", res)
}
if !res.RejectedByPolicy {
t.Fatalf("expected RejectedByPolicy=true, got %#v", res)
}
if len(res.Calls) != 0 {
t.Fatalf("expected no calls when allow-list is empty, got %#v", res.Calls)
}
}
func TestFormatOpenAIToolCalls(t *testing.T) {
formatted := FormatOpenAIToolCalls([]ParsedToolCall{{Name: "search", Input: map[string]any{"q": "x"}}})
if len(formatted) != 1 {

View File

@@ -364,8 +364,8 @@ func TestFormatOpenAIStreamToolCalls(t *testing.T) {
func TestParseToolCallsNoToolNames(t *testing.T) {
text := `{"tool_calls":[{"name":"search","input":{"q":"go"}}]}`
calls := ParseToolCalls(text, nil)
if len(calls) != 1 {
t.Fatalf("expected 1 call with nil tool names, got %d", len(calls))
if len(calls) != 0 {
t.Fatalf("expected 0 call with nil tool names, got %d", len(calls))
}
}