Compare commits

...

19 Commits

Author SHA1 Message Date
CJACK.
89eaf048c3 Merge pull request #221 from CJackHwang/dev
Dev
2026-04-06 16:50:00 +08:00
CJACK.
904211469a Merge pull request #222 from CJackHwang/codex/resolve-pull-request-issues-and-complete-tests
Add golangci-lint bootstrap and CI lint gate; update docs and .gitignore
2026-04-06 13:56:05 +08:00
CJACK.
530872ff2f Merge pull request #224 from CJackHwang/codex/fix-four-issues-from-pull-requests-99aozg
Fix lint bootstrap behavior and harden SSE token replay parsing
2026-04-06 13:55:30 +08:00
CJACK.
fbe1e25c7b Merge pull request #225 from CJackHwang/codex/fix-golangci-lint-bootstrap-compatibility
Treat missing golangci-lint as bootstrap-compatible
2026-04-06 13:54:50 +08:00
CJACK.
cd7e03d936 Merge pull request #226 from CJackHwang/codex/fix-issue-with-passing-thresholds
Handle deferred Close errors, normalize error messages, and add nolint annotations
2026-04-06 13:54:14 +08:00
CJACK.
37fb758191 Make full quality gates pass across repository 2026-04-06 13:41:58 +08:00
CJACK.
fb6be8a8ee fix lint bootstrap on missing golangci-lint 2026-04-06 13:38:17 +08:00
CJACK.
57114a36f5 fix: address codex review issues for lint bootstrap and token replay 2026-04-06 13:12:36 +08:00
CJACK.
a671d82759 chore: auto-bootstrap golangci-lint for Go 1.26 compatibility 2026-04-06 12:52:56 +08:00
CJACK.
da75ed6966 Merge pull request #220 from CJackHwang/codex/fix-pull-request-review-comments
Migrate and reorganize .golangci.yml to v2 with updated linters and exclusions
2026-04-06 12:33:51 +08:00
CJACK.
3b99d2edbe docs: add full-sample token replay command and report fields 2026-04-06 12:32:31 +08:00
CJACK.
f6c09ebd63 fix: keep node error-branch token semantics and add grep fallback 2026-04-06 12:32:26 +08:00
CJACK.
36af2e00f6 Merge pull request #219 from CJackHwang/dev
Dev
2026-04-06 11:17:39 +08:00
CJACK.
9e0fd83a76 test: validate raw stream token replay and enforce gofmt in lint script 2026-04-06 11:15:08 +08:00
CJACK.
a8c160b05d fix: parse DeepSeek accumulated_token_usage robustly and stabilize lint 2026-04-06 11:14:48 +08:00
CJACK.
89ca57122c fix: migrate golangci config to v2 schema 2026-04-06 09:29:22 +08:00
CJACK
6b6ce3eea8 refactor: move toolcall utility files to internal/toolcall directory 2026-04-06 03:56:42 +08:00
CJACK
870144de17 ci: remove golangci-lint step from quality gates workflow 2026-04-06 03:53:03 +08:00
CJACK
1530246e4f refactor: move tool call parsing and formatting logic to a dedicated internal/toolcall package 2026-04-06 03:19:18 +08:00
87 changed files with 760 additions and 263 deletions

View File

@@ -28,6 +28,16 @@ jobs:
cache: "npm"
cache-dependency-path: webui/package-lock.json
- name: Setup golangci-lint
uses: golangci/golangci-lint-action@v8
with:
version: v2.11.4
install-mode: binary
verify: true
- name: Go Format & Lint Gates
run: ./scripts/lint.sh
- name: Refactor Line Gate
run: ./tests/scripts/check-refactor-line-gate.sh

3
.gitignore vendored
View File

@@ -59,3 +59,6 @@ Thumbs.db
# Claude Code
.claude/
CLAUDE.local.md
# Local tool bootstrap cache
.tmp/

73
.golangci.yml Normal file
View File

@@ -0,0 +1,73 @@
version: "2"
run:
tests: true
linters:
default: standard
enable:
- errcheck
- govet
- ineffassign
- staticcheck
- unused
settings:
dupl:
threshold: 100
goconst:
min-len: 2
min-occurrences: 2
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- wrapperFunc
- rangeValCopy
- hugeParam
gocyclo:
min-complexity: 15
lll:
line-length: 140
misspell:
locale: US
nakedret:
max-func-lines: 30
prealloc:
simple: true
range-loops: true
for-loops: false
exclusions:
generated: lax
rules:
- path: (.+)\.go$
text: "ST1000: at least one file in a package should have a package comment"
paths:
- third_party$
- builtin$
- examples$
- vendor$
- webui/node_modules$
issues:
max-issues-per-linter: 0
max-same-issues: 0
formatters:
enable:
- gofmt
settings:
goimports:
local-prefixes:
- ds2api
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
- vendor$
- webui/node_modules$

View File

@@ -81,7 +81,7 @@ flowchart LR
- **统一路由内核**:所有协议入口统一汇聚到 `internal/server/router.go`,并在同一路由树中注册 OpenAI / Claude / Gemini / Admin / WebUI 路由,避免多入口行为漂移。
- **统一执行链路**Claude / Gemini 入口先经 `internal/translatorcliproxy` 做协议转换,再进入 `openai.ChatCompletions` 统一处理工具调用与流式语义,最后再转换回原协议响应。
- **适配器分层更清晰**`internal/adapter/{claude,gemini}` 负责入口/出口协议封装,`internal/adapter/openai` 负责核心执行DeepSeek 侧调用只保留在 OpenAI 内核中。
- **Tool Calling 双运行时对齐**Go 侧(`internal/util`)与 Vercel Node 侧(`internal/js/helpers/stream-tool-sieve`)保持一致的解析/防泄漏语义,覆盖 JSON / XML / invoke / text-kv 多风格输入。
- **Tool Calling 双运行时对齐**Go 侧(`internal/toolcall`)与 Vercel Node 侧(`internal/js/helpers/stream-tool-sieve`)保持一致的解析/防泄漏语义,覆盖 JSON / XML / invoke / text-kv 多风格输入。
- **配置与运行时设置解耦**:静态配置(`config`)与运行时策略(`settings`)通过 Admin API 分离管理,支持热更新和密码轮换失效旧 JWT。
- **流式能力升级**`/v1/responses` 与 `/v1/chat/completions` 共享更一致的工具调用增量输出策略,降低不同 SDK 下的行为差异。
- **可观测与可运维增强**`/healthz`、`/readyz`、`/admin/version`、`/admin/dev/captures` 形成排障闭环,便于发布后验证。
@@ -466,7 +466,8 @@ ds2api/
│ ├── stream/ # 统一流式消费引擎
│ ├── testsuite/ # 端到端测试框架与用例编排
│ ├── translatorcliproxy/ # CLIProxy 桥接与流写入组件
│ ├── util/ # 通用工具函数
│ ├── toolcall/ # Tool Call 解析、修复与格式化(核心业务逻辑)
│ ├── util/ # 通用工具函数Token 估算、JSON 辅助等)
│ ├── version/ # 版本解析 / 比较与 tag 规范化
│ └── webui/ # WebUI 静态文件托管与自动构建
├── webui/ # React WebUI 源码Vite + Tailwind
@@ -543,7 +544,7 @@ npm ci --prefix webui && npm run build --prefix webui
go test ./...
# 运行 tool calls 相关测试(调试工具调用问题)
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/util/
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/toolcall/
# 运行端到端测试
./tests/scripts/run-live.sh

View File

@@ -81,7 +81,7 @@ flowchart LR
- **Unified routing core**: all protocol entries are now centralized through `internal/server/router.go`, with OpenAI / Claude / Gemini / Admin / WebUI routes registered in one tree to avoid multi-entry drift.
- **Unified execution chain**: Claude/Gemini entries are translated by `internal/translatorcliproxy`, then executed through `openai.ChatCompletions` for shared tool-calling and stream semantics, then translated back to the client protocol.
- **Cleaner adapter boundaries**: `internal/adapter/{claude,gemini}` handles protocol wrappers, while `internal/adapter/openai` remains the execution core; upstream DeepSeek calls are retained only in the OpenAI core.
- **Tool-calling parity across runtimes**: Go (`internal/util`) and Vercel Node (`internal/js/helpers/stream-tool-sieve`) follow aligned parsing/anti-leak semantics across JSON / XML / invoke / text-kv inputs.
- **Tool-calling parity across runtimes**: Go (`internal/toolcall`) and Vercel Node (`internal/js/helpers/stream-tool-sieve`) follow aligned parsing/anti-leak semantics across JSON / XML / invoke / text-kv inputs.
- **Config/runtime separation**: static config (`config`) and runtime policy (`settings`) are managed independently via Admin APIs, enabling hot updates and password rotation with JWT invalidation.
- **Streaming behavior upgrade**: `/v1/responses` and `/v1/chat/completions` now share a more consistent incremental tool-call emission strategy across SDK ecosystems.
- **Improved operability**: `/healthz`, `/readyz`, `/admin/version`, and `/admin/dev/captures` form a tighter post-deploy diagnostics loop.
@@ -464,7 +464,8 @@ ds2api/
│ ├── stream/ # Unified stream consumption engine
│ ├── testsuite/ # End-to-end testsuite framework and case orchestration
│ ├── translatorcliproxy/ # CLIProxy bridge and stream writer components
│ ├── util/ # Common utilities
│ ├── toolcall/ # Tool Call parsing, repair, and formatting (core business logic)
│ ├── util/ # Common utilities (Token estimation, JSON helpers, etc.)
│ ├── version/ # Version parsing/comparison and tag normalization
│ └── webui/ # WebUI static file serving and auto-build
├── webui/ # React WebUI source (Vite + Tailwind)

View File

@@ -1 +1 @@
3.1.1
3.1.2

View File

@@ -30,8 +30,8 @@ func main() {
opts.Timeout = time.Duration(timeoutSeconds) * time.Second
if err := testsuite.Run(context.Background(), opts); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
_, _ = fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Fprintln(os.Stdout, "testsuite completed successfully")
_, _ = fmt.Fprintln(os.Stdout, "testsuite completed successfully")
}

View File

@@ -59,7 +59,7 @@ docker-compose -f docker-compose.dev.yml up
| Language | Standards |
| --- | --- |
| **Go** | Run `gofmt` and ensure `go test ./...` passes before committing |
| **Go** | Run `./scripts/lint.sh` (gofmt + golangci-lint) and ensure `go test ./...` passes before committing |
| **JavaScript/React** | Follow existing project style (functional components) |
| **Commit messages** | Use semantic prefixes: `feat:`, `fix:`, `docs:`, `refactor:`, `style:`, `perf:`, `chore:` |

View File

@@ -59,7 +59,7 @@ docker-compose -f docker-compose.dev.yml up
| 语言 | 规范 |
| --- | --- |
| **Go** | 提交前运行 `gofmt`确保 `go test ./...` 通过 |
| **Go** | 提交前运行 `./scripts/lint.sh`(包含 gofmt+golangci-lint确保 `go test ./...` 通过 |
| **JavaScript/React** | 保持现有代码风格(函数组件) |
| **提交信息** | 使用语义化前缀:`feat:``fix:``docs:``refactor:``style:``perf:``chore:` |

View File

@@ -180,10 +180,10 @@ go test ./...
```bash
# 运行 tool calls 相关测试(推荐用于调试 tool call 解析问题)
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/util/
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/toolcall/
# 运行单个测试用例
go test -v -run TestParseToolCallsWithDeepSeekHallucination ./internal/util/
go test -v -run TestParseToolCallsWithDeepSeekHallucination ./internal/toolcall/
# 运行 format 相关测试
go test -v ./internal/format/...
@@ -198,13 +198,13 @@ go test -v ./internal/adapter/openai/...
```bash
# 1. 运行 tool calls 相关的所有测试
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/util/
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/toolcall/
# 2. 查看测试输出中的详细调试信息
go test -v -run TestParseToolCallsWithDeepSeekHallucination ./internal/util/ 2>&1
go test -v -run TestParseToolCallsWithDeepSeekHallucination ./internal/toolcall/ 2>&1
# 3. 检查具体测试用例的修复效果
# 测试用例位于 internal/util/toolcalls_test.go包含
# 测试用例位于 internal/toolcall/toolcalls_test.go包含
# - TestParseToolCallsWithDeepSeekHallucination: DeepSeek 典型幻觉输出
# - TestRepairLooseJSONWithNestedObjects: 嵌套对象的方括号修复
# - TestParseToolCallsWithMixedWindowsPaths: Windows 路径处理

View File

@@ -64,7 +64,7 @@ func (h *Handler) proxyViaOpenAI(w http.ResponseWriter, r *http.Request, store C
rec := httptest.NewRecorder()
h.OpenAI.ChatCompletions(rec, proxyReq)
res := rec.Result()
defer res.Body.Close()
defer func() { _ = res.Body.Close() }()
body, _ := io.ReadAll(res.Body)
for k, vv := range res.Header {
for _, v := range vv {
@@ -94,7 +94,7 @@ func (h *Handler) proxyViaOpenAI(w http.ResponseWriter, r *http.Request, store C
rec := httptest.NewRecorder()
h.OpenAI.ChatCompletions(rec, proxyReq)
res := rec.Result()
defer res.Body.Close()
defer func() { _ = res.Body.Close() }()
body, _ := io.ReadAll(res.Body)
if res.StatusCode < 200 || res.StatusCode >= 300 {
for k, vv := range res.Header {
@@ -124,7 +124,7 @@ func (h *Handler) proxyViaOpenAI(w http.ResponseWriter, r *http.Request, store C
}
func (h *Handler) handleClaudeStreamRealtime(w http.ResponseWriter, r *http.Request, resp *http.Response, model string, messages []any, thinkingEnabled, searchEnabled bool, toolNames []string) {
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
writeClaudeError(w, http.StatusInternalServerError, string(body))

View File

@@ -1,12 +1,12 @@
package claude
import (
"ds2api/internal/toolcall"
"encoding/json"
"fmt"
"strings"
"ds2api/internal/prompt"
"ds2api/internal/util"
)
func normalizeClaudeMessages(messages []any) []any {
@@ -98,9 +98,10 @@ func buildClaudeToolPrompt(tools []any) string {
}
return "You have access to these tools:\n\n" +
strings.Join(toolSchemas, "\n\n") + "\n\n" +
util.BuildToolCallInstructions(names)
toolcall.BuildToolCallInstructions(names)
}
//nolint:unused // retained for compatibility with pending Claude tool-result prompt flow.
func formatClaudeToolResultForPrompt(block map[string]any) string {
if block == nil {
return ""

View File

@@ -96,6 +96,7 @@ func looksLikeBase64Payload(v string) bool {
return true
}
//nolint:unused // helper kept for compatibility with upcoming sanitize pipeline.
func marshalCompactJSON(v any) string {
b, err := json.Marshal(v)
if err != nil {

View File

@@ -18,7 +18,7 @@ func normalizeClaudeRequest(store ConfigReader, req map[string]any) (claudeNorma
model, _ := req["model"].(string)
messagesRaw, _ := req["messages"].([]any)
if strings.TrimSpace(model) == "" || len(messagesRaw) == 0 {
return claudeNormalizedRequest{}, fmt.Errorf("Request must include 'model' and 'messages'.")
return claudeNormalizedRequest{}, fmt.Errorf("request must include 'model' and 'messages'")
}
if _, ok := req["max_tokens"]; !ok {
req["max_tokens"] = 8192

View File

@@ -1,6 +1,7 @@
package claude
import (
"ds2api/internal/toolcall"
"encoding/json"
"fmt"
"time"
@@ -46,9 +47,9 @@ func (s *claudeStreamRuntime) finalize(stopReason string) {
finalText := cleanVisibleOutput(s.text.String(), s.stripReferenceMarkers)
if s.bufferToolContent {
detected := util.ParseStandaloneToolCalls(finalText, s.toolNames)
detected := toolcall.ParseStandaloneToolCalls(finalText, s.toolNames)
if len(detected) == 0 && finalText == "" && finalThinking != "" {
detected = util.ParseStandaloneToolCalls(finalThinking, s.toolNames)
detected = toolcall.ParseStandaloneToolCalls(finalThinking, s.toolNames)
}
if len(detected) > 0 {
stopReason = "tool_use"

View File

@@ -5,6 +5,7 @@ import (
"strings"
)
//nolint:unused // compatibility hook for native Gemini request normalization path.
func collectGeminiPassThrough(req map[string]any) map[string]any {
cfg, _ := req["generationConfig"].(map[string]any)
if len(cfg) == 0 {

View File

@@ -9,6 +9,7 @@ import (
"ds2api/internal/util"
)
//nolint:unused // kept for native Gemini adapter route compatibility.
func normalizeGeminiRequest(store ConfigReader, routeModel string, req map[string]any, stream bool) (util.StandardRequest, error) {
requestedModel := strings.TrimSpace(routeModel)
if requestedModel == "" {
@@ -17,13 +18,13 @@ func normalizeGeminiRequest(store ConfigReader, routeModel string, req map[strin
resolvedModel, ok := config.ResolveModel(store, requestedModel)
if !ok {
return util.StandardRequest{}, fmt.Errorf("Model '%s' is not available.", requestedModel)
return util.StandardRequest{}, fmt.Errorf("model %q is not available", requestedModel)
}
thinkingEnabled, searchEnabled, _ := config.GetModelConfig(resolvedModel)
messagesRaw := geminiMessagesFromRequest(req)
if len(messagesRaw) == 0 {
return util.StandardRequest{}, fmt.Errorf("Request must include non-empty contents.")
return util.StandardRequest{}, fmt.Errorf("request must include non-empty contents")
}
toolsRaw := convertGeminiTools(req["tools"])

View File

@@ -2,6 +2,7 @@ package gemini
import "strings"
//nolint:unused // kept for native Gemini adapter route compatibility.
func convertGeminiTools(raw any) []any {
tools, _ := raw.([]any)
if len(tools) == 0 {

View File

@@ -2,6 +2,7 @@ package gemini
import (
"bytes"
"ds2api/internal/toolcall"
"encoding/json"
"io"
"net/http"
@@ -57,7 +58,7 @@ func (h *Handler) proxyViaOpenAI(w http.ResponseWriter, r *http.Request, stream
rec := httptest.NewRecorder()
h.OpenAI.ChatCompletions(rec, proxyReq)
res := rec.Result()
defer res.Body.Close()
defer func() { _ = res.Body.Close() }()
body, _ := io.ReadAll(res.Body)
for k, vv := range res.Header {
for _, v := range vv {
@@ -87,7 +88,7 @@ func (h *Handler) proxyViaOpenAI(w http.ResponseWriter, r *http.Request, stream
rec := httptest.NewRecorder()
h.OpenAI.ChatCompletions(rec, proxyReq)
res := rec.Result()
defer res.Body.Close()
defer func() { _ = res.Body.Close() }()
body, _ := io.ReadAll(res.Body)
if res.StatusCode < 200 || res.StatusCode >= 300 {
for k, vv := range res.Header {
@@ -131,8 +132,9 @@ func writeGeminiErrorFromOpenAI(w http.ResponseWriter, status int, raw []byte) {
writeGeminiError(w, status, message)
}
//nolint:unused // retained for native Gemini non-stream handling path.
func (h *Handler) handleNonStreamGenerateContent(w http.ResponseWriter, resp *http.Response, model, finalPrompt string, thinkingEnabled bool, toolNames []string) {
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
writeGeminiError(w, resp.StatusCode, strings.TrimSpace(string(body)))
@@ -151,6 +153,7 @@ func (h *Handler) handleNonStreamGenerateContent(w http.ResponseWriter, resp *ht
))
}
//nolint:unused // retained for native Gemini non-stream handling path.
func buildGeminiGenerateContentResponse(model, finalPrompt, finalThinking, finalText string, toolNames []string, outputTokens int) map[string]any {
parts := buildGeminiPartsFromFinal(finalText, finalThinking, toolNames)
usage := buildGeminiUsage(finalPrompt, finalThinking, finalText, outputTokens)
@@ -170,6 +173,7 @@ func buildGeminiGenerateContentResponse(model, finalPrompt, finalThinking, final
}
}
//nolint:unused // retained for native Gemini non-stream handling path.
func buildGeminiUsage(finalPrompt, finalThinking, finalText string, outputTokens int) map[string]any {
promptTokens := util.EstimateTokens(finalPrompt)
reasoningTokens := util.EstimateTokens(finalThinking)
@@ -185,10 +189,11 @@ func buildGeminiUsage(finalPrompt, finalThinking, finalText string, outputTokens
}
}
//nolint:unused // retained for native Gemini non-stream handling path.
func buildGeminiPartsFromFinal(finalText, finalThinking string, toolNames []string) []map[string]any {
detected := util.ParseToolCalls(finalText, toolNames)
detected := toolcall.ParseToolCalls(finalText, toolNames)
if len(detected) == 0 && finalThinking != "" {
detected = util.ParseToolCalls(finalThinking, toolNames)
detected = toolcall.ParseToolCalls(finalThinking, toolNames)
}
if len(detected) > 0 {
parts := make([]map[string]any, 0, len(detected))

View File

@@ -17,6 +17,7 @@ type Handler struct {
OpenAI OpenAIChatRunner
}
//nolint:unused // used by native Gemini stream/non-stream runtime helpers.
func (h *Handler) compatStripReferenceMarkers() bool {
if h == nil || h.Store == nil {
return true

View File

@@ -12,8 +12,9 @@ import (
streamengine "ds2api/internal/stream"
)
//nolint:unused // retained for native Gemini stream handling path.
func (h *Handler) handleStreamGenerateContent(w http.ResponseWriter, r *http.Request, resp *http.Response, model, finalPrompt string, thinkingEnabled, searchEnabled bool, toolNames []string) {
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
writeGeminiError(w, resp.StatusCode, strings.TrimSpace(string(body)))
@@ -49,6 +50,7 @@ func (h *Handler) handleStreamGenerateContent(w http.ResponseWriter, r *http.Req
})
}
//nolint:unused // retained for native Gemini stream handling path.
type geminiStreamRuntime struct {
w http.ResponseWriter
rc *http.ResponseController
@@ -68,6 +70,7 @@ type geminiStreamRuntime struct {
outputTokens int
}
//nolint:unused // retained for native Gemini stream handling path.
func newGeminiStreamRuntime(
w http.ResponseWriter,
rc *http.ResponseController,
@@ -93,6 +96,7 @@ func newGeminiStreamRuntime(
}
}
//nolint:unused // retained for native Gemini stream handling path.
func (s *geminiStreamRuntime) sendChunk(payload map[string]any) {
b, _ := json.Marshal(payload)
_, _ = s.w.Write([]byte("data: "))
@@ -103,6 +107,7 @@ func (s *geminiStreamRuntime) sendChunk(payload map[string]any) {
}
}
//nolint:unused // retained for native Gemini stream handling path.
func (s *geminiStreamRuntime) onParsed(parsed sse.LineResult) streamengine.ParsedDecision {
if !parsed.Parsed {
return streamengine.ParsedDecision{}
@@ -158,6 +163,7 @@ func (s *geminiStreamRuntime) onParsed(parsed sse.LineResult) streamengine.Parse
return streamengine.ParsedDecision{ContentSeen: contentSeen}
}
//nolint:unused // retained for native Gemini stream handling path.
func (s *geminiStreamRuntime) finalize() {
finalThinking := s.thinking.String()
finalText := cleanVisibleOutput(s.text.String(), s.stripReferenceMarkers)

View File

@@ -42,19 +42,23 @@ func (m testGeminiAuth) Determine(_ *http.Request) (*auth.RequestAuth, error) {
func (testGeminiAuth) Release(_ *auth.RequestAuth) {}
//nolint:unused // reserved test double for native Gemini DS-call path coverage.
type testGeminiDS struct {
resp *http.Response
err error
}
//nolint:unused // reserved test double for native Gemini DS-call path coverage.
func (m testGeminiDS) CreateSession(_ context.Context, _ *auth.RequestAuth, _ int) (string, error) {
return "session-id", nil
}
//nolint:unused // reserved test double for native Gemini DS-call path coverage.
func (m testGeminiDS) GetPow(_ context.Context, _ *auth.RequestAuth, _ int) (string, error) {
return "pow", nil
}
//nolint:unused // reserved test double for native Gemini DS-call path coverage.
func (m testGeminiDS) CallCompletion(_ context.Context, _ *auth.RequestAuth, _ map[string]any, _ string, _ int) (*http.Response, error) {
if m.err != nil {
return nil, m.err
@@ -100,6 +104,7 @@ func (s geminiOpenAISuccessStub) ChatCompletions(w http.ResponseWriter, _ *http.
_, _ = w.Write([]byte(out))
}
//nolint:unused // helper retained for native Gemini stream fixture tests.
func makeGeminiUpstreamResponse(lines ...string) *http.Response {
body := strings.Join(lines, "\n")
if !strings.HasSuffix(body, "\n") {

View File

@@ -2,6 +2,7 @@ package gemini
import textclean "ds2api/internal/textclean"
//nolint:unused // retained for native Gemini output post-processing path.
func cleanVisibleOutput(text string, stripReferenceMarkers bool) string {
if text == "" {
return text

View File

@@ -1,6 +1,7 @@
package openai
import (
"ds2api/internal/toolcall"
"encoding/json"
"net/http"
"strings"
@@ -8,7 +9,6 @@ import (
openaifmt "ds2api/internal/format/openai"
"ds2api/internal/sse"
streamengine "ds2api/internal/stream"
"ds2api/internal/util"
)
type chatStreamRuntime struct {
@@ -102,7 +102,7 @@ func (s *chatStreamRuntime) sendDone() {
func (s *chatStreamRuntime) finalize(finishReason string) {
finalThinking := s.thinking.String()
finalText := cleanVisibleOutput(s.text.String(), s.stripReferenceMarkers)
detected := util.ParseStandaloneToolCallsDetailed(finalText, s.toolNames)
detected := toolcall.ParseStandaloneToolCallsDetailed(finalText, s.toolNames)
if len(detected.Calls) > 0 && !s.toolCallsDoneEmitted {
finishReason = "tool_calls"
delta := map[string]any{

View File

@@ -116,7 +116,7 @@ func (h *Handler) autoDeleteRemoteSession(ctx context.Context, a *auth.RequestAu
func (h *Handler) handleNonStream(w http.ResponseWriter, ctx context.Context, resp *http.Response, completionID, model, finalPrompt string, thinkingEnabled bool, toolNames []string) {
if resp.StatusCode != http.StatusOK {
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
body, _ := io.ReadAll(resp.Body)
writeOpenAIError(w, resp.StatusCode, string(body))
return
@@ -143,7 +143,7 @@ func (h *Handler) handleNonStream(w http.ResponseWriter, ctx context.Context, re
}
func (h *Handler) handleStream(w http.ResponseWriter, r *http.Request, resp *http.Response, completionID, model, finalPrompt string, thinkingEnabled, searchEnabled bool, toolNames []string) {
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
writeOpenAIError(w, resp.StatusCode, string(body))

View File

@@ -107,7 +107,7 @@ type autoDeleteCtxDSStub struct {
}
func (m *autoDeleteCtxDSStub) DeleteSessionForToken(ctx context.Context, token string, sessionID string) (*deepseek.DeleteSessionResult, error) {
return m.autoDeleteModeDSStub.DeleteSessionForTokenCtx(ctx, token, sessionID)
return m.DeleteSessionForTokenCtx(ctx, token, sessionID)
}
func (m *autoDeleteCtxDSStub) DeleteAllSessionsForToken(_ context.Context, _ string) error {

View File

@@ -1,6 +1,7 @@
package openai
import (
"ds2api/internal/toolcall"
"encoding/json"
"fmt"
"strings"
@@ -75,7 +76,7 @@ func injectToolPrompt(messages []map[string]any, tools []any, policy util.ToolCh
// buildToolCallInstructions delegates to the shared util implementation.
func buildToolCallInstructions(toolNames []string) string {
return util.BuildToolCallInstructions(toolNames)
return toolcall.BuildToolCallInstructions(toolNames)
}
func formatIncrementalStreamToolCallDeltas(deltas []toolCallDelta, ids map[int]string) []map[string]any {
@@ -138,7 +139,7 @@ func filterIncrementalToolCallDeltasByAllowed(deltas []toolCallDelta, allowedNam
return out
}
func formatFinalStreamToolCallsWithStableIDs(calls []util.ParsedToolCall, ids map[int]string) []map[string]any {
func formatFinalStreamToolCallsWithStableIDs(calls []toolcall.ParsedToolCall, ids map[int]string) []map[string]any {
if len(calls) == 0 {
return nil
}

View File

@@ -1,6 +1,7 @@
package openai
import (
"ds2api/internal/toolcall"
"encoding/json"
"io"
"net/http"
@@ -106,7 +107,7 @@ func (h *Handler) Responses(w http.ResponseWriter, r *http.Request) {
}
func (h *Handler) handleResponsesNonStream(w http.ResponseWriter, resp *http.Response, owner, responseID, model, finalPrompt string, thinkingEnabled bool, toolNames []string, toolChoice util.ToolChoicePolicy, traceID string) {
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
writeOpenAIError(w, resp.StatusCode, strings.TrimSpace(string(body)))
@@ -119,7 +120,7 @@ func (h *Handler) handleResponsesNonStream(w http.ResponseWriter, resp *http.Res
if writeUpstreamEmptyOutputError(w, sanitizedThinking, sanitizedText, result.ContentFilter) {
return
}
textParsed := util.ParseStandaloneToolCallsDetailed(sanitizedText, toolNames)
textParsed := toolcall.ParseStandaloneToolCallsDetailed(sanitizedText, toolNames)
logResponsesToolPolicyRejection(traceID, toolChoice, textParsed, "text")
callCount := len(textParsed.Calls)
@@ -142,7 +143,7 @@ func (h *Handler) handleResponsesNonStream(w http.ResponseWriter, resp *http.Res
}
func (h *Handler) handleResponsesStream(w http.ResponseWriter, r *http.Request, resp *http.Response, owner, responseID, model, finalPrompt string, thinkingEnabled, searchEnabled bool, toolNames []string, toolChoice util.ToolChoicePolicy, traceID string) {
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
writeOpenAIError(w, resp.StatusCode, strings.TrimSpace(string(body)))
@@ -200,7 +201,7 @@ func (h *Handler) handleResponsesStream(w http.ResponseWriter, r *http.Request,
})
}
func logResponsesToolPolicyRejection(traceID string, policy util.ToolChoicePolicy, parsed util.ToolCallParseResult, channel string) {
func logResponsesToolPolicyRejection(traceID string, policy util.ToolChoicePolicy, parsed toolcall.ToolCallParseResult, channel string) {
rejected := filteredRejectedToolNamesForLog(parsed.RejectedToolNames)
if !parsed.RejectedByPolicy || len(rejected) == 0 {
return

View File

@@ -1,6 +1,7 @@
package openai
import (
"ds2api/internal/toolcall"
"net/http"
"strings"
@@ -107,7 +108,7 @@ func (s *responsesStreamRuntime) finalize() {
s.processToolStreamEvents(flushToolSieve(&s.sieve, s.toolNames), true)
}
textParsed := util.ParseStandaloneToolCallsDetailed(finalText, s.toolNames)
textParsed := toolcall.ParseStandaloneToolCallsDetailed(finalText, s.toolNames)
detected := textParsed.Calls
s.logToolPolicyRejections(textParsed)
@@ -163,8 +164,8 @@ func (s *responsesStreamRuntime) finalize() {
s.sendDone()
}
func (s *responsesStreamRuntime) logToolPolicyRejections(textParsed util.ToolCallParseResult) {
logRejected := func(parsed util.ToolCallParseResult, channel string) {
func (s *responsesStreamRuntime) logToolPolicyRejections(textParsed toolcall.ToolCallParseResult) {
logRejected := func(parsed toolcall.ToolCallParseResult, channel string) {
rejected := filteredRejectedToolNamesForLog(parsed.RejectedToolNames)
if !parsed.RejectedByPolicy || len(rejected) == 0 {
return

View File

@@ -1,11 +1,11 @@
package openai
import (
"ds2api/internal/toolcall"
"encoding/json"
"strings"
openaifmt "ds2api/internal/format/openai"
"ds2api/internal/util"
"github.com/google/uuid"
)
@@ -208,7 +208,7 @@ func (s *responsesStreamRuntime) emitFunctionCallDeltaEvents(deltas []toolCallDe
}
}
func (s *responsesStreamRuntime) emitFunctionCallDoneEvents(calls []util.ParsedToolCall) {
func (s *responsesStreamRuntime) emitFunctionCallDoneEvents(calls []toolcall.ParsedToolCall) {
for idx, tc := range calls {
if strings.TrimSpace(tc.Name) == "" {
continue

View File

@@ -1,12 +1,12 @@
package openai
import (
"ds2api/internal/toolcall"
"encoding/json"
"sort"
"strings"
openaifmt "ds2api/internal/format/openai"
"ds2api/internal/util"
)
func (s *responsesStreamRuntime) closeIncompleteFunctionItems() {
@@ -57,7 +57,7 @@ func (s *responsesStreamRuntime) closeIncompleteFunctionItems() {
}
}
func (s *responsesStreamRuntime) buildCompletedResponseObject(finalThinking, finalText string, calls []util.ParsedToolCall) map[string]any {
func (s *responsesStreamRuntime) buildCompletedResponseObject(finalThinking, finalText string, calls []toolcall.ParsedToolCall) map[string]any {
type indexedItem struct {
index int
item map[string]any

View File

@@ -12,11 +12,11 @@ func normalizeOpenAIChatRequest(store ConfigReader, req map[string]any, traceID
model, _ := req["model"].(string)
messagesRaw, _ := req["messages"].([]any)
if strings.TrimSpace(model) == "" || len(messagesRaw) == 0 {
return util.StandardRequest{}, fmt.Errorf("Request must include 'model' and 'messages'.")
return util.StandardRequest{}, fmt.Errorf("request must include 'model' and 'messages'")
}
resolvedModel, ok := config.ResolveModel(store, model)
if !ok {
return util.StandardRequest{}, fmt.Errorf("Model '%s' is not available.", model)
return util.StandardRequest{}, fmt.Errorf("model %q is not available", model)
}
thinkingEnabled, searchEnabled, _ := config.GetModelConfig(resolvedModel)
responseModel := strings.TrimSpace(model)
@@ -48,11 +48,11 @@ func normalizeOpenAIResponsesRequest(store ConfigReader, req map[string]any, tra
model, _ := req["model"].(string)
model = strings.TrimSpace(model)
if model == "" {
return util.StandardRequest{}, fmt.Errorf("Request must include 'model'.")
return util.StandardRequest{}, fmt.Errorf("request must include 'model'")
}
resolvedModel, ok := config.ResolveModel(store, model)
if !ok {
return util.StandardRequest{}, fmt.Errorf("Model '%s' is not available.", model)
return util.StandardRequest{}, fmt.Errorf("model %q is not available", model)
}
thinkingEnabled, searchEnabled, _ := config.GetModelConfig(resolvedModel)
@@ -68,7 +68,7 @@ func normalizeOpenAIResponsesRequest(store ConfigReader, req map[string]any, tra
messagesRaw = msgs
}
if len(messagesRaw) == 0 {
return util.StandardRequest{}, fmt.Errorf("Request must include 'input' or 'messages'.")
return util.StandardRequest{}, fmt.Errorf("request must include 'input' or 'messages'")
}
toolPolicy, err := parseToolChoicePolicy(req["tool_choice"], req["tools"])
if err != nil {
@@ -152,7 +152,7 @@ func parseToolChoicePolicy(toolChoiceRaw any, toolsRaw any) (util.ToolChoicePoli
case "required":
policy.Mode = util.ToolChoiceRequired
default:
return util.ToolChoicePolicy{}, fmt.Errorf("Unsupported tool_choice: %q", v)
return util.ToolChoicePolicy{}, fmt.Errorf("unsupported tool_choice: %q", v)
}
case map[string]any:
allowedOverride, hasAllowedOverride, err := parseAllowedToolNames(v["allowed_tools"])
@@ -198,7 +198,7 @@ func parseToolChoicePolicy(toolChoiceRaw any, toolsRaw any) (util.ToolChoicePoli
policy.ForcedName = name
policy.Allowed = namesToSet([]string{name})
default:
return util.ToolChoicePolicy{}, fmt.Errorf("Unsupported tool_choice.type: %q", typ)
return util.ToolChoicePolicy{}, fmt.Errorf("unsupported tool_choice.type: %q", typ)
}
default:
return util.ToolChoicePolicy{}, fmt.Errorf("tool_choice must be a string or object")
@@ -206,7 +206,7 @@ func parseToolChoicePolicy(toolChoiceRaw any, toolsRaw any) (util.ToolChoicePoli
if policy.Mode == util.ToolChoiceRequired || policy.Mode == util.ToolChoiceForced {
if len(declaredNames) == 0 {
return util.ToolChoicePolicy{}, fmt.Errorf("tool_choice=%s requires non-empty tools.", policy.Mode)
return util.ToolChoicePolicy{}, fmt.Errorf("tool_choice=%s requires non-empty tools", policy.Mode)
}
}
if policy.Mode == util.ToolChoiceForced {

View File

@@ -3,7 +3,7 @@ package openai
import (
"strings"
"ds2api/internal/util"
"ds2api/internal/toolcall"
)
func processToolSieveChunk(state *toolStreamSieveState, chunk string, toolNames []string) []toolStreamEvent {
@@ -226,7 +226,7 @@ func findToolSegmentStart(s string) int {
return start
}
func consumeToolCapture(state *toolStreamSieveState, toolNames []string) (prefix string, calls []util.ParsedToolCall, suffix string, ready bool) {
func consumeToolCapture(state *toolStreamSieveState, toolNames []string) (prefix string, calls []toolcall.ParsedToolCall, suffix string, ready bool) {
captured := state.capture.String()
if captured == "" {
return "", nil, "", false
@@ -267,7 +267,7 @@ func consumeToolCapture(state *toolStreamSieveState, toolNames []string) (prefix
}
prefixPart := captured[:start]
suffixPart := captured[end:]
parsed := util.ParseStandaloneToolCallsDetailed(obj, toolNames)
parsed := toolcall.ParseStandaloneToolCallsDetailed(obj, toolNames)
if len(parsed.Calls) == 0 {
if parsed.SawToolCallSyntax && parsed.RejectedByPolicy {
// Parsed as tool-call payload but rejected by schema/policy:

View File

@@ -1,9 +1,8 @@
package openai
import (
"ds2api/internal/toolcall"
"strings"
"ds2api/internal/util"
)
type toolStreamSieveState struct {
@@ -12,7 +11,7 @@ type toolStreamSieveState struct {
capturing bool
recentTextTail string
pendingToolRaw string
pendingToolCalls []util.ParsedToolCall
pendingToolCalls []toolcall.ParsedToolCall
disableDeltas bool
toolNameSent bool
toolName string
@@ -24,7 +23,7 @@ type toolStreamSieveState struct {
type toolStreamEvent struct {
Content string
ToolCalls []util.ParsedToolCall
ToolCalls []toolcall.ParsedToolCall
ToolCallDeltas []toolCallDelta
}

View File

@@ -1,14 +1,14 @@
package openai
import (
"ds2api/internal/toolcall"
"regexp"
"strings"
"ds2api/internal/util"
)
// --- XML tool call support for the streaming sieve ---
//nolint:unused // kept as explicit tag inventory for future XML sieve refinements.
var xmlToolCallClosingTags = []string{"</tool_calls>", "</tool_call>", "</invoke>", "</function_call>", "</function_calls>", "</tool_use>",
// Agent-style XML tags (Roo Code, Cline, etc.)
"</attempt_completion>", "</ask_followup_question>", "</new_task>", "</result>"}
@@ -34,6 +34,8 @@ var xmlToolCallTagPairs = []struct{ open, close string }{
}
// xmlToolCallBlockPattern matches a complete XML tool call block (wrapper or standalone).
//
//nolint:unused // reserved for future fast-path XML block detection.
var xmlToolCallBlockPattern = regexp.MustCompile(`(?is)(<tool_calls>\s*(?:.*?)\s*</tool_calls>|<tool_call>\s*(?:.*?)\s*</tool_call>|<invoke\b[^>]*>(?:.*?)</invoke>|<function_calls?\b[^>]*>(?:.*?)</function_calls?>|<tool_use>(?:.*?)</tool_use>|<attempt_completion>(?:.*?)</attempt_completion>|<ask_followup_question>(?:.*?)</ask_followup_question>|<new_task>(?:.*?)</new_task>)`)
// xmlToolTagsToDetect is the set of XML tag prefixes used by findToolSegmentStart.
@@ -43,7 +45,7 @@ var xmlToolTagsToDetect = []string{"<tool_calls>", "<tool_calls\n", "<tool_call>
"<attempt_completion>", "<ask_followup_question>", "<new_task>"}
// consumeXMLToolCapture tries to extract complete XML tool call blocks from captured text.
func consumeXMLToolCapture(captured string, toolNames []string) (prefix string, calls []util.ParsedToolCall, suffix string, ready bool) {
func consumeXMLToolCapture(captured string, toolNames []string) (prefix string, calls []toolcall.ParsedToolCall, suffix string, ready bool) {
lower := strings.ToLower(captured)
// Find the FIRST matching open/close pair, preferring wrapper tags.
// Tag pairs are ordered longest-first (e.g. <tool_calls before <tool_call)
@@ -66,7 +68,7 @@ func consumeXMLToolCapture(captured string, toolNames []string) (prefix string,
xmlBlock := captured[openIdx:closeEnd]
prefixPart := captured[:openIdx]
suffixPart := captured[closeEnd:]
parsed := util.ParseToolCalls(xmlBlock, toolNames)
parsed := toolcall.ParseToolCalls(xmlBlock, toolNames)
if len(parsed) > 0 {
prefixPart, suffixPart = trimWrappingJSONFence(prefixPart, suffixPart)
return prefixPart, parsed, suffixPart, true

View File

@@ -165,7 +165,7 @@ func (h *Handler) testAccount(ctx context.Context, acc config.Account, model, me
return result
}
if resp.StatusCode != http.StatusOK {
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
result["message"] = fmt.Sprintf("请求失败: HTTP %d", resp.StatusCode)
return result
}
@@ -218,7 +218,7 @@ func (h *Handler) testAPI(w http.ResponseWriter, r *http.Request) {
writeJSON(w, http.StatusOK, map[string]any{"success": false, "error": err.Error()})
return
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode == http.StatusOK {
var parsed any

View File

@@ -85,7 +85,7 @@ func (h *Handler) addKey(w http.ResponseWriter, r *http.Request) {
err := h.Store.Update(func(c *config.Config) error {
for _, k := range c.Keys {
if k == key {
return fmt.Errorf("Key 已存在")
return fmt.Errorf("key 已存在")
}
}
c.Keys = append(c.Keys, key)
@@ -109,7 +109,7 @@ func (h *Handler) deleteKey(w http.ResponseWriter, r *http.Request) {
}
}
if idx < 0 {
return fmt.Errorf("Key 不存在")
return fmt.Errorf("key 不存在")
}
c.Keys = append(c.Keys[:idx], c.Keys[idx+1:]...)
return nil

View File

@@ -288,17 +288,17 @@ func TestQueryRawSampleCapturesGroupsBySessionAndMatchesQuestion(t *testing.T) {
func TestBuildCaptureChainsPreservesCaptureOrderWhenTimestampsCollide(t *testing.T) {
snapshot := []devcapture.Entry{
{
ID: "cap_continue",
CreatedAt: 1712365200,
Label: "deepseek_continue",
RequestBody: `{"chat_session_id":"session-collision","message_id":2}`,
ID: "cap_continue",
CreatedAt: 1712365200,
Label: "deepseek_continue",
RequestBody: `{"chat_session_id":"session-collision","message_id":2}`,
ResponseBody: "data: {\"v\":\"第二段\"}\n\n",
},
{
ID: "cap_completion",
CreatedAt: 1712365200,
Label: "deepseek_completion",
RequestBody: `{"chat_session_id":"session-collision","prompt":"题目"}`,
ID: "cap_completion",
CreatedAt: 1712365200,
Label: "deepseek_completion",
RequestBody: `{"chat_session_id":"session-collision","prompt":"题目"}`,
ResponseBody: "data: {\"v\":\"第一段\"}\n\n",
},
}

View File

@@ -301,7 +301,7 @@ func vercelRequest(ctx context.Context, client *http.Client, method, endpoint st
if err != nil {
return nil, 0, err
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
b, _ := io.ReadAll(resp.Body)
parsed := map[string]any{}
_ = json.Unmarshal(b, &parsed)

View File

@@ -43,7 +43,7 @@ func (h *Handler) getVersion(w http.ResponseWriter, _ *http.Request) {
writeJSON(w, http.StatusOK, resp)
return
}
defer r.Body.Close()
defer func() { _ = r.Body.Close() }()
if r.StatusCode < 200 || r.StatusCode >= 300 {
resp["check_error"] = "github api status: " + r.Status
writeJSON(w, http.StatusOK, resp)

View File

@@ -130,9 +130,7 @@ func TestMarkTokenInvalidNotConfigToken(t *testing.T) {
a := &RequestAuth{UseConfigToken: false, DeepSeekToken: "direct", resolver: r}
r.MarkTokenInvalid(a)
// Should not panic, token should be unchanged for non-config
if a.DeepSeekToken != "" {
// Actually it does clear it; that's fine - let's check behavior
}
_ = a.DeepSeekToken // Actual behavior may clear it; this test only asserts no panic.
}
func TestMarkTokenInvalidEmptyAccountID(t *testing.T) {

View File

@@ -1,6 +1,7 @@
package compat
import (
"ds2api/internal/toolcall"
"encoding/json"
"os"
"path/filepath"
@@ -86,22 +87,22 @@ func TestGoCompatToolcallFixtures(t *testing.T) {
mustLoadJSON(t, fixturePath, &fixture)
var expected struct {
Calls []util.ParsedToolCall `json:"calls"`
SawToolCallSyntax bool `json:"sawToolCallSyntax"`
RejectedByPolicy bool `json:"rejectedByPolicy"`
RejectedToolNames []string `json:"rejectedToolNames"`
Calls []toolcall.ParsedToolCall `json:"calls"`
SawToolCallSyntax bool `json:"sawToolCallSyntax"`
RejectedByPolicy bool `json:"rejectedByPolicy"`
RejectedToolNames []string `json:"rejectedToolNames"`
}
mustLoadJSON(t, expectedPath, &expected)
var got util.ToolCallParseResult
var got toolcall.ToolCallParseResult
switch strings.ToLower(strings.TrimSpace(fixture.Mode)) {
case "standalone":
got = util.ParseStandaloneToolCallsDetailed(fixture.Text, fixture.ToolNames)
got = toolcall.ParseStandaloneToolCallsDetailed(fixture.Text, fixture.ToolNames)
default:
got = util.ParseToolCallsDetailed(fixture.Text, fixture.ToolNames)
got = toolcall.ParseToolCallsDetailed(fixture.Text, fixture.ToolNames)
}
if got.Calls == nil {
got.Calls = []util.ParsedToolCall{}
got.Calls = []toolcall.ParsedToolCall{}
}
if got.RejectedToolNames == nil {
got.RejectedToolNames = []string{}

View File

@@ -58,8 +58,7 @@ func TestLoadStorePreservesFileBackedTokensForRuntime(t *testing.T) {
if err != nil {
t.Fatalf("create temp config: %v", err)
}
defer tmp.Close()
defer func() { _ = tmp.Close() }()
if _, err := tmp.WriteString(`{
"accounts":[{"email":"u@example.com","password":"p","token":"persisted-token"}]
}`); err != nil {
@@ -355,7 +354,7 @@ func TestAccountTestStatusIsRuntimeOnlyAndNotPersisted(t *testing.T) {
if err != nil {
t.Fatalf("create temp config: %v", err)
}
defer tmp.Close()
defer func() { _ = tmp.Close() }()
if _, err := tmp.WriteString(`{
"accounts":[{"email":"u@example.com","password":"p","test_status":"ok"}]
}`); err != nil {

View File

@@ -31,7 +31,7 @@ func TestCallContinuePropagatesPowHeaderToFallbackRequest(t *testing.T) {
var seenURL string
client := &Client{
stream: failingDoer{err: errors.New("stream transport failed")},
stream: failingDoer{err: errors.New("stream transport failed")},
fallbackS: &http.Client{
Transport: roundTripperFunc(func(req *http.Request) (*http.Response, error) {
seenPow = req.Header.Get("x-ds-pow-response")
@@ -54,8 +54,7 @@ func TestCallContinuePropagatesPowHeaderToFallbackRequest(t *testing.T) {
if err != nil {
t.Fatalf("callContinue returned error: %v", err)
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
if seenPow != "pow-response-abc" {
t.Fatalf("continue request pow header=%q want=%q", seenPow, "pow-response-abc")
}
@@ -105,8 +104,7 @@ func TestCallCompletionAutoContinueThreadsPowHeader(t *testing.T) {
if err != nil {
t.Fatalf("CallCompletion returned error: %v", err)
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
out, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("read auto-continued body failed: %v", err)

View File

@@ -19,7 +19,7 @@ func readResponseBody(resp *http.Response) ([]byte, error) {
if err != nil {
return nil, err
}
defer gz.Close()
defer func() { _ = gz.Close() }()
reader = gz
case "br":
reader = brotli.NewReader(resp.Body)

View File

@@ -49,7 +49,7 @@ func (c *Client) postJSONWithStatus(ctx context.Context, doer trans.Doer, url st
return nil, 0, err
}
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
payloadBytes, err := readResponseBody(resp)
if err != nil {
return nil, resp.StatusCode, err
@@ -86,7 +86,7 @@ func (c *Client) getJSONWithStatus(ctx context.Context, doer trans.Doer, url str
return nil, 0, err
}
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
payloadBytes, err := readResponseBody(resp)
if err != nil {
return nil, resp.StatusCode, err

View File

@@ -6,13 +6,13 @@ import (
)
const (
DeepSeekHost = "chat.deepseek.com"
DeepSeekLoginURL = "https://chat.deepseek.com/api/v0/users/login"
DeepSeekCreateSessionURL = "https://chat.deepseek.com/api/v0/chat_session/create"
DeepSeekCreatePowURL = "https://chat.deepseek.com/api/v0/chat/create_pow_challenge"
DeepSeekCompletionURL = "https://chat.deepseek.com/api/v0/chat/completion"
DeepSeekContinueURL = "https://chat.deepseek.com/api/v0/chat/continue"
DeepSeekFetchSessionURL = "https://chat.deepseek.com/api/v0/chat_session/fetch_page"
DeepSeekHost = "chat.deepseek.com"
DeepSeekLoginURL = "https://chat.deepseek.com/api/v0/users/login"
DeepSeekCreateSessionURL = "https://chat.deepseek.com/api/v0/chat_session/create"
DeepSeekCreatePowURL = "https://chat.deepseek.com/api/v0/chat/create_pow_challenge"
DeepSeekCompletionURL = "https://chat.deepseek.com/api/v0/chat/completion"
DeepSeekContinueURL = "https://chat.deepseek.com/api/v0/chat/continue"
DeepSeekFetchSessionURL = "https://chat.deepseek.com/api/v0/chat_session/fetch_page"
DeepSeekDeleteSessionURL = "https://chat.deepseek.com/api/v0/chat_session/delete"
DeepSeekDeleteAllSessionsURL = "https://chat.deepseek.com/api/v0/chat_session/delete_all"
)

View File

@@ -1,6 +1,7 @@
package claude
import (
"ds2api/internal/toolcall"
"fmt"
"time"
@@ -8,9 +9,9 @@ import (
)
func BuildMessageResponse(messageID, model string, normalizedMessages []any, finalThinking, finalText string, toolNames []string) map[string]any {
detected := util.ParseToolCalls(finalText, toolNames)
detected := toolcall.ParseToolCalls(finalText, toolNames)
if len(detected) == 0 && finalText == "" && finalThinking != "" {
detected = util.ParseToolCalls(finalThinking, toolNames)
detected = toolcall.ParseToolCalls(finalThinking, toolNames)
}
content := make([]map[string]any, 0, 4)
if finalThinking != "" {

View File

@@ -1,14 +1,13 @@
package openai
import (
"ds2api/internal/toolcall"
"strings"
"time"
"ds2api/internal/util"
)
func BuildChatCompletion(completionID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
detected := util.ParseStandaloneToolCallsDetailed(finalText, toolNames)
detected := toolcall.ParseStandaloneToolCallsDetailed(finalText, toolNames)
finishReason := "stop"
messageObj := map[string]any{"role": "assistant", "content": finalText}
if strings.TrimSpace(finalThinking) != "" {
@@ -16,7 +15,7 @@ func BuildChatCompletion(completionID, model, finalPrompt, finalThinking, finalT
}
if len(detected.Calls) > 0 {
finishReason = "tool_calls"
messageObj["tool_calls"] = util.FormatOpenAIToolCalls(detected.Calls)
messageObj["tool_calls"] = toolcall.FormatOpenAIToolCalls(detected.Calls)
messageObj["content"] = nil
}

View File

@@ -1,19 +1,18 @@
package openai
import (
"ds2api/internal/toolcall"
"encoding/json"
"strings"
"time"
"github.com/google/uuid"
"ds2api/internal/util"
)
func BuildResponseObject(responseID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
// Strict mode: only standalone, structured tool-call payloads are treated
// as executable tool calls.
detected := util.ParseStandaloneToolCallsDetailed(finalText, toolNames)
detected := toolcall.ParseStandaloneToolCallsDetailed(finalText, toolNames)
exposedOutputText := finalText
output := make([]any, 0, 2)
if len(detected.Calls) > 0 {
@@ -71,7 +70,7 @@ func BuildResponseObjectFromItems(responseID, model, finalPrompt, finalThinking,
}
}
func toResponsesFunctionCallItems(toolCalls []util.ParsedToolCall) []any {
func toResponsesFunctionCallItems(toolCalls []toolcall.ParsedToolCall) []any {
if len(toolCalls) == 0 {
return nil
}

View File

@@ -71,7 +71,6 @@ func BuildResponsesTextDeltaPayload(responseID, itemID string, outputIndex, cont
}
}
func BuildResponsesTextDonePayload(responseID, itemID string, outputIndex, contentIndex int, text string) map[string]any {
return map[string]any{
"type": "response.output_text.done",

View File

@@ -20,6 +20,8 @@ function parseChunkForContent(chunk, thinkingEnabled, currentType, stripReferenc
};
}
const outputTokens = extractAccumulatedTokenUsage(chunk);
if (Object.prototype.hasOwnProperty.call(chunk, 'error')) {
return {
parsed: true,
@@ -33,7 +35,6 @@ function parseChunkForContent(chunk, thinkingEnabled, currentType, stripReferenc
}
const pathValue = asString(chunk.p);
const outputTokens = extractAccumulatedTokenUsage(chunk);
if (hasContentFilterStatus(chunk)) {
return {
@@ -465,10 +466,19 @@ function findAccumulatedTokenUsage(v) {
}
function toInt(v) {
if (typeof v !== 'number' || !Number.isFinite(v)) {
if (typeof v === 'number' && Number.isFinite(v)) {
return Math.trunc(v);
}
if (typeof v === 'string' && v.trim() !== '') {
const n = Number(v);
if (Number.isFinite(n)) {
return Math.trunc(n);
}
}
if (typeof v !== 'number') {
return 0;
}
return Math.trunc(v);
return Number.isFinite(v) ? Math.trunc(v) : 0;
}
function formatErrorMessage(v) {

View File

@@ -5,6 +5,7 @@ import (
"strings"
)
//nolint:unused // retained for raw-sample processing entrypoints.
func extractProcessedVisibleText(raw []byte, kind, contentType string) string {
if len(raw) == 0 {
return ""
@@ -22,6 +23,7 @@ func extractProcessedVisibleText(raw []byte, kind, contentType string) string {
return parseOpenAIStreamText(string(raw))
}
//nolint:unused // retained for raw-sample processing entrypoints.
func parseOpenAIStreamText(raw string) string {
if strings.TrimSpace(raw) == "" {
return ""
@@ -54,6 +56,7 @@ func parseOpenAIStreamText(raw string) string {
return out.String()
}
//nolint:unused // retained for raw-sample processing entrypoints.
func parseOpenAIJSONText(raw string) string {
if strings.TrimSpace(raw) == "" {
return ""
@@ -65,6 +68,7 @@ func parseOpenAIJSONText(raw string) string {
return extractOpenAIVisibleTextValue(decoded)
}
//nolint:unused // retained for raw-sample processing entrypoints.
func extractOpenAIVisibleTextValue(v any) string {
switch x := v.(type) {
case nil:

View File

@@ -24,7 +24,7 @@ type CollectResult struct {
// The caller is responsible for closing resp.Body unless closeBody is true.
func CollectStream(resp *http.Response, thinkingEnabled bool, closeBody bool) CollectResult {
if closeBody {
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
}
text := strings.Builder{}
thinking := strings.Builder{}

View File

@@ -20,8 +20,9 @@ func ParseDeepSeekContentLine(raw []byte, thinkingEnabled bool, currentType stri
if !parsed {
return LineResult{NextType: currentType}
}
outputTokens := extractAccumulatedTokenUsage(chunk)
if done {
return LineResult{Parsed: true, Stop: true, NextType: currentType}
return LineResult{Parsed: true, Stop: true, NextType: currentType, OutputTokens: outputTokens}
}
if errObj, hasErr := chunk["error"]; hasErr {
return LineResult{
@@ -29,6 +30,7 @@ func ParseDeepSeekContentLine(raw []byte, thinkingEnabled bool, currentType stri
Stop: true,
ErrorMessage: fmt.Sprintf("%v", errObj),
NextType: currentType,
OutputTokens: outputTokens,
}
}
if code, _ := chunk["code"].(string); code == "content_filter" {
@@ -37,7 +39,7 @@ func ParseDeepSeekContentLine(raw []byte, thinkingEnabled bool, currentType stri
Stop: true,
ContentFilter: true,
NextType: currentType,
OutputTokens: extractAccumulatedTokenUsage(chunk),
OutputTokens: outputTokens,
}
}
if hasContentFilterStatus(chunk) {
@@ -46,16 +48,16 @@ func ParseDeepSeekContentLine(raw []byte, thinkingEnabled bool, currentType stri
Stop: true,
ContentFilter: true,
NextType: currentType,
OutputTokens: extractAccumulatedTokenUsage(chunk),
OutputTokens: outputTokens,
}
}
parts, finished, nextType := ParseSSEChunkForContent(chunk, thinkingEnabled, currentType)
parts = filterLeakedContentFilterParts(parts)
return LineResult{
Parsed: true,
Stop: finished,
Parts: parts,
NextType: nextType,
OutputTokens: extractAccumulatedTokenUsage(chunk),
Parsed: true,
Stop: finished,
Parts: parts,
NextType: nextType,
OutputTokens: outputTokens,
}
}

View File

@@ -53,6 +53,23 @@ func TestParseDeepSeekContentLineCapturesAccumulatedTokenUsage(t *testing.T) {
}
}
func TestParseDeepSeekContentLineCapturesAccumulatedTokenUsageString(t *testing.T) {
res := ParseDeepSeekContentLine([]byte(`data: {"p":"response","o":"BATCH","v":[{"p":"accumulated_token_usage","v":"190"},{"p":"quasi_status","v":"FINISHED"}]}`), false, "text")
if res.OutputTokens != 190 {
t.Fatalf("expected output token usage 190, got %d", res.OutputTokens)
}
}
func TestParseDeepSeekContentLineErrorIncludesOutputTokens(t *testing.T) {
res := ParseDeepSeekContentLine([]byte(`data: {"error":"boom","accumulated_token_usage":123}`), false, "text")
if !res.Parsed || !res.Stop {
t.Fatalf("expected stop on error: %#v", res)
}
if res.OutputTokens != 123 {
t.Fatalf("expected output token usage 123 on error, got %d", res.OutputTokens)
}
}
func TestParseDeepSeekContentLineContent(t *testing.T) {
res := ParseDeepSeekContentLine([]byte(`data: {"p":"response/content","v":"hi"}`), false, "text")
if !res.Parsed || res.Stop {

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"math"
"strconv"
"strings"
"ds2api/internal/deepseek"
@@ -56,9 +57,7 @@ func isFragmentStatusPath(path string) bool {
if mid == "" {
return false
}
if strings.HasPrefix(mid, "-") {
mid = mid[1:]
}
mid = strings.TrimPrefix(mid, "-")
if mid == "" {
return false
}
@@ -309,11 +308,12 @@ func extractContentRecursive(items []any, defaultType string) ([]ContentPart, bo
}
typeName, _ := x["type"].(string)
typeName = strings.ToUpper(typeName)
if typeName == "THINK" || typeName == "THINKING" {
switch typeName {
case "THINK", "THINKING":
parts = append(parts, ContentPart{Text: ct, Type: "thinking"})
} else if typeName == "RESPONSE" {
case "RESPONSE":
parts = append(parts, ContentPart{Text: ct, Type: "text"})
} else {
default:
parts = append(parts, ContentPart{Text: ct, Type: partType})
}
case string:
@@ -413,6 +413,19 @@ func toInt(v any) (int, bool) {
return 0, false
}
return int(i), true
case string:
s := strings.TrimSpace(x)
if s == "" {
return 0, false
}
if i, err := strconv.Atoi(s); err == nil {
return i, true
}
f, err := strconv.ParseFloat(s, 64)
if err != nil || math.IsNaN(f) || math.IsInf(f, 0) {
return 0, false
}
return int(f), true
default:
return 0, false
}

View File

@@ -0,0 +1,134 @@
package sse
import (
"bufio"
"encoding/json"
"errors"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
)
func TestRawStreamSamplesTokenReplay(t *testing.T) {
root := filepath.Join("..", "..", "tests", "raw_stream_samples")
entries, err := os.ReadDir(root)
if err != nil {
t.Fatalf("read samples root: %v", err)
}
found := 0
for _, entry := range entries {
if !entry.IsDir() {
continue
}
ssePath := filepath.Join(root, entry.Name(), "upstream.stream.sse")
if _, err := os.Stat(ssePath); err != nil {
continue
}
found++
t.Run(entry.Name(), func(t *testing.T) {
raw, err := os.ReadFile(ssePath)
if err != nil {
t.Fatalf("read sample: %v", err)
}
parsedTokens, expectedTokens, err := replayAndCollectTokens(string(raw))
if err != nil {
t.Fatalf("replay token collection failed: %v", err)
}
if expectedTokens <= 0 {
t.Fatalf("expected positive token usage from raw stream, got %d", expectedTokens)
}
if parsedTokens != expectedTokens {
t.Fatalf("token mismatch parsed=%d expected=%d", parsedTokens, expectedTokens)
}
})
}
if found == 0 {
t.Fatalf("no upstream.stream.sse samples found under %s", root)
}
}
func replayAndCollectTokens(raw string) (parsedTokens int, expectedTokens int, err error) {
currentType := "thinking"
scanner := bufio.NewScanner(strings.NewReader(raw))
scanner.Buffer(make([]byte, 0, 64*1024), 2*1024*1024)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if !strings.HasPrefix(line, "data:") {
continue
}
payload := strings.TrimSpace(strings.TrimPrefix(line, "data:"))
if payload == "" || payload == "[DONE]" || !strings.HasPrefix(payload, "{") {
continue
}
var chunk map[string]any
if err := json.Unmarshal([]byte(payload), &chunk); err != nil {
continue
}
if n := rawAccumulatedTokenUsage(chunk); n > 0 {
expectedTokens = n
}
res := ParseDeepSeekContentLine([]byte(line), true, currentType)
currentType = res.NextType
if res.OutputTokens > 0 {
parsedTokens = res.OutputTokens
}
}
if scanErr := scanner.Err(); scanErr != nil {
if errors.Is(scanErr, bufio.ErrTooLong) {
return 0, 0, errors.New("raw stream line exceeds 2MiB scanner limit")
}
return 0, 0, scanErr
}
return parsedTokens, expectedTokens, nil
}
func rawAccumulatedTokenUsage(v any) int {
switch x := v.(type) {
case []any:
for _, item := range x {
if n := rawAccumulatedTokenUsage(item); n > 0 {
return n
}
}
case map[string]any:
if n := rawToInt(x["accumulated_token_usage"]); n > 0 {
return n
}
if p, _ := x["p"].(string); strings.Contains(strings.ToLower(strings.TrimSpace(p)), "accumulated_token_usage") {
if n := rawToInt(x["v"]); n > 0 {
return n
}
}
for _, vv := range x {
if n := rawAccumulatedTokenUsage(vv); n > 0 {
return n
}
}
}
return 0
}
func rawToInt(v any) int {
switch x := v.(type) {
case float64:
return int(x)
case int:
return x
case string:
s := strings.TrimSpace(x)
if s == "" {
return 0
}
if n, err := strconv.Atoi(s); err == nil {
return n
}
if f, err := strconv.ParseFloat(s, 64); err == nil {
return int(f)
}
}
return 0
}

View File

@@ -103,7 +103,7 @@ func TestStartParsedLinePumpContextCancellation(t *testing.T) {
// Cancel context - this will cause the pump to exit on next send
cancel()
// Close the pipe to unblock scanner.Scan()
pw.Close()
_ = pw.Close()
// Drain remaining results
for range results {

View File

@@ -170,7 +170,7 @@ func (r *Runner) caseToolcallStreamMixed(ctx context.Context, cc *caseContext) e
cc.assert("tool_calls_delta_present", hasTool, "tool_calls delta missing")
cc.assert("no_raw_tool_json_leak", !rawLeak, "raw tool_calls leaked")
cc.assert("done_terminated", done, "expected [DONE]")
if !(hasTool && hasText) {
if !hasTool || !hasText {
r.warnings = append(r.warnings, "toolcall mixed stream did not produce both text and tool_calls in this run (model-side behavior dependent)")
}
return nil

View File

@@ -58,7 +58,7 @@ func (cc *caseContext) abortStreamRequest(ctx context.Context, spec requestSpec)
})
return err
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
buf := make([]byte, 512)
_, _ = resp.Body.Read(buf)
_ = resp.Body.Close()

View File

@@ -66,7 +66,7 @@ func (r *Runner) pruneOldRuns() error {
if err := os.RemoveAll(dirPath); err != nil {
errs = append(errs, fmt.Sprintf("remove %s: %v", name, err))
} else {
fmt.Fprintf(os.Stdout, "pruned old test run: %s\n", name)
_, _ = fmt.Fprintf(os.Stdout, "pruned old test run: %s\n", name)
}
}
@@ -82,7 +82,7 @@ func (r *Runner) runPreflight(ctx context.Context) error {
if err != nil {
return err
}
defer f.Close()
defer func() { _ = f.Close() }()
for _, step := range steps {
if _, err := fmt.Fprintf(f, "\n$ %s\n", strings.Join(step, " ")); err != nil {
return err
@@ -218,7 +218,7 @@ func (r *Runner) ping(path string) error {
if err != nil {
return err
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("status=%d", resp.StatusCode)
}

View File

@@ -115,7 +115,7 @@ func (cc *caseContext) requestOnce(ctx context.Context, spec requestSpec, attemp
cc.mu.Unlock()
return nil, err
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
body, _ := io.ReadAll(resp.Body)
cc.mu.Lock()
@@ -131,7 +131,7 @@ func (cc *caseContext) requestOnce(ctx context.Context, spec requestSpec, attemp
})
if spec.Stream {
cc.streamRaw.WriteString(fmt.Sprintf("### trace=%s url=%s\n", traceID, fullURL))
_, _ = fmt.Fprintf(&cc.streamRaw, "### trace=%s url=%s\n", traceID, fullURL)
cc.streamRaw.Write(body)
cc.streamRaw.WriteString("\n\n")
}

View File

@@ -126,7 +126,7 @@ func findFreePort() (int, error) {
if err != nil {
return 0, err
}
defer ln.Close()
defer func() { _ = ln.Close() }()
addr, ok := ln.Addr().(*net.TCPAddr)
if !ok {
return 0, errors.New("failed to detect tcp port")

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import "strings"

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"strings"

View File

@@ -0,0 +1,92 @@
package toolcall
import (
"testing"
)
// ─── FormatOpenAIStreamToolCalls ─────────────────────────────────────
func TestFormatOpenAIStreamToolCalls(t *testing.T) {
formatted := FormatOpenAIStreamToolCalls([]ParsedToolCall{
{Name: "search", Input: map[string]any{"q": "test"}},
})
if len(formatted) != 1 {
t.Fatalf("expected 1, got %d", len(formatted))
}
fn, _ := formatted[0]["function"].(map[string]any)
if fn["name"] != "search" {
t.Fatalf("unexpected function name: %#v", fn)
}
if formatted[0]["index"] != 0 {
t.Fatalf("expected index 0, got %v", formatted[0]["index"])
}
}
// ─── ParseToolCalls more edge cases ──────────────────────────────────
func TestParseToolCallsNoToolNames(t *testing.T) {
text := `{"tool_calls":[{"name":"search","input":{"q":"go"}}]}`
calls := ParseToolCalls(text, nil)
if len(calls) != 1 {
t.Fatalf("expected 1 call with nil tool names, got %d", len(calls))
}
}
func TestParseToolCallsEmptyText(t *testing.T) {
calls := ParseToolCalls("", []string{"search"})
if len(calls) != 0 {
t.Fatalf("expected 0 calls for empty text, got %d", len(calls))
}
}
func TestParseToolCallsMultipleTools(t *testing.T) {
text := `{"tool_calls":[{"name":"search","input":{"q":"go"}},{"name":"get_weather","input":{"city":"beijing"}}]}`
calls := ParseToolCalls(text, []string{"search", "get_weather"})
if len(calls) != 2 {
t.Fatalf("expected 2 calls, got %d", len(calls))
}
}
func TestParseToolCallsInputAsString(t *testing.T) {
text := `{"tool_calls":[{"name":"search","input":"{\"q\":\"golang\"}"}]}`
calls := ParseToolCalls(text, []string{"search"})
if len(calls) != 1 {
t.Fatalf("expected 1 call, got %d", len(calls))
}
if calls[0].Input["q"] != "golang" {
t.Fatalf("expected parsed string input, got %#v", calls[0].Input)
}
}
func TestParseToolCallsWithFunctionWrapper(t *testing.T) {
text := `{"tool_calls":[{"function":{"name":"calc","arguments":{"x":1,"y":2}}}]}`
calls := ParseToolCalls(text, []string{"calc"})
if len(calls) != 1 {
t.Fatalf("expected 1 call, got %d", len(calls))
}
if calls[0].Name != "calc" {
t.Fatalf("expected calc, got %q", calls[0].Name)
}
}
func TestParseStandaloneToolCallsFencedCodeBlock(t *testing.T) {
fenced := "Here's an example:\n```json\n{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go\"}}]}\n```\nDon't execute this."
calls := ParseStandaloneToolCalls(fenced, []string{"search"})
if len(calls) != 0 {
t.Fatalf("expected fenced code block to be ignored, got %d calls", len(calls))
}
}
// ─── looksLikeToolExampleContext ─────────────────────────────────────
func TestLooksLikeToolExampleContextNone(t *testing.T) {
if looksLikeToolExampleContext("I will call the tool now") {
t.Fatal("expected false for non-example context")
}
}
func TestLooksLikeToolExampleContextFenced(t *testing.T) {
if !looksLikeToolExampleContext("```json") {
t.Fatal("expected true for fenced code block context")
}
}

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"regexp"
@@ -8,6 +8,8 @@ import (
var toolCallPattern = regexp.MustCompile(`\{\s*["']tool_calls["']\s*:\s*\[(.*?)\]\s*\}`)
var fencedJSONPattern = regexp.MustCompile("(?s)```(?:json)?\\s*(.*?)\\s*```")
var fencedCodeBlockPattern = regexp.MustCompile("(?s)```[\\s\\S]*?```")
//nolint:unused // retained for future markup tool-call heuristics.
var markupToolSyntaxPattern = regexp.MustCompile(`(?i)<(?:(?:[a-z0-9_:-]+:)?(?:tool_call|function_call|invoke)\b|(?:[a-z0-9_:-]+:)?function_calls\b|(?:[a-z0-9_:-]+:)?tool_use\b)`)
func buildToolCallCandidates(text string) []string {
@@ -190,6 +192,7 @@ func shouldSkipToolCallParsingForCodeFenceExample(text string) bool {
return !looksLikeToolCallSyntax(stripped)
}
//nolint:unused // retained for future markup tool-call heuristics.
func looksLikeMarkupToolSyntax(text string) bool {
return markupToolSyntaxPattern.MatchString(text)
}

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"encoding/json"

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"encoding/json"

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"regexp"
@@ -27,7 +27,7 @@ func repairInvalidJSONBackslashes(s string) string {
isHex := true
for j := 1; j <= 4; j++ {
r := runes[i+1+j]
if !((r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')) {
if (r < '0' || r > '9') && (r < 'a' || r > 'f') && (r < 'A' || r > 'F') {
isHex = false
break
}

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"encoding/json"

View File

@@ -1,12 +1,14 @@
package util
package toolcall
import (
"regexp"
"strings"
)
//nolint:unused // retained for policy-level tool-name matching compatibility.
var toolNameLoosePattern = regexp.MustCompile(`[^a-z0-9]+`)
//nolint:unused // retained for policy-level tool-name matching compatibility.
func resolveAllowedToolNameWithLooseMatch(name string, allowed map[string]struct{}, allowedCanonical map[string]string) string {
if _, ok := allowed[name]; ok {
return name

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"encoding/json"
@@ -164,6 +164,7 @@ func filterToolCallsDetailed(parsed []ParsedToolCall, availableToolNames []strin
return out, nil
}
//nolint:unused // retained for policy-level tool-name matching compatibility.
func resolveAllowedToolName(name string, allowed map[string]struct{}, allowedCanonical map[string]string) string {
return resolveAllowedToolNameWithLooseMatch(name, allowed, allowedCanonical)
}

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import "strings"
@@ -7,7 +7,7 @@ func isLikelyJSONToolPayloadCandidate(candidate string) bool {
if trimmed == "" {
return false
}
if !(strings.HasPrefix(trimmed, "{") || strings.HasPrefix(trimmed, "[")) {
if !strings.HasPrefix(trimmed, "{") && !strings.HasPrefix(trimmed, "[") {
return false
}
lower := strings.ToLower(trimmed)
@@ -73,7 +73,6 @@ func parseToolCallItem(m map[string]any) (ParsedToolCall, bool) {
for _, key := range []string{"arguments", "args", "parameters", "params"} {
if v, ok := m[key]; ok {
inputRaw = v
hasInput = true
break
}
}

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"encoding/json"

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"strings"

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"regexp"

View File

@@ -1,4 +1,4 @@
package util
package toolcall
import (
"testing"

View File

@@ -1,6 +1,7 @@
package util
import (
"ds2api/internal/toolcall"
"fmt"
"strings"
"time"
@@ -11,7 +12,7 @@ import (
// BuildOpenAIChatCompletion is kept for backward compatibility.
// Prefer internal/format/openai.BuildChatCompletion for new code.
func BuildOpenAIChatCompletion(completionID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
detected := ParseToolCalls(finalText, toolNames)
detected := toolcall.ParseToolCalls(finalText, toolNames)
finishReason := "stop"
messageObj := map[string]any{"role": "assistant", "content": finalText}
if strings.TrimSpace(finalThinking) != "" {
@@ -19,7 +20,7 @@ func BuildOpenAIChatCompletion(completionID, model, finalPrompt, finalThinking,
}
if len(detected) > 0 {
finishReason = "tool_calls"
messageObj["tool_calls"] = FormatOpenAIToolCalls(detected)
messageObj["tool_calls"] = toolcall.FormatOpenAIToolCalls(detected)
messageObj["content"] = nil
}
promptTokens := EstimateTokens(finalPrompt)
@@ -46,7 +47,7 @@ func BuildOpenAIChatCompletion(completionID, model, finalPrompt, finalThinking,
// BuildOpenAIResponseObject is kept for backward compatibility.
// Prefer internal/format/openai.BuildResponseObject for new code.
func BuildOpenAIResponseObject(responseID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
detected := ParseToolCalls(finalText, toolNames)
detected := toolcall.ParseToolCalls(finalText, toolNames)
exposedOutputText := finalText
output := make([]any, 0, 2)
if len(detected) > 0 {
@@ -108,7 +109,7 @@ func BuildOpenAIResponseObject(responseID, model, finalPrompt, finalThinking, fi
// BuildClaudeMessageResponse is kept for backward compatibility.
// Prefer internal/format/claude.BuildMessageResponse for new code.
func BuildClaudeMessageResponse(messageID, model string, normalizedMessages []any, finalThinking, finalText string, toolNames []string) map[string]any {
detected := ParseToolCalls(finalText, toolNames)
detected := toolcall.ParseToolCalls(finalText, toolNames)
content := make([]map[string]any, 0, 4)
if finalThinking != "" {
content = append(content, map[string]any{"type": "thinking", "thinking": finalThinking})

View File

@@ -355,90 +355,3 @@ func TestConvertClaudeToDeepSeekOpusUsesSlowMapping(t *testing.T) {
t.Fatalf("expected opus to use slow mapping, got %q", out["model"])
}
}
// ─── FormatOpenAIStreamToolCalls ─────────────────────────────────────
func TestFormatOpenAIStreamToolCalls(t *testing.T) {
formatted := FormatOpenAIStreamToolCalls([]ParsedToolCall{
{Name: "search", Input: map[string]any{"q": "test"}},
})
if len(formatted) != 1 {
t.Fatalf("expected 1, got %d", len(formatted))
}
fn, _ := formatted[0]["function"].(map[string]any)
if fn["name"] != "search" {
t.Fatalf("unexpected function name: %#v", fn)
}
if formatted[0]["index"] != 0 {
t.Fatalf("expected index 0, got %v", formatted[0]["index"])
}
}
// ─── ParseToolCalls more edge cases ──────────────────────────────────
func TestParseToolCallsNoToolNames(t *testing.T) {
text := `{"tool_calls":[{"name":"search","input":{"q":"go"}}]}`
calls := ParseToolCalls(text, nil)
if len(calls) != 1 {
t.Fatalf("expected 1 call with nil tool names, got %d", len(calls))
}
}
func TestParseToolCallsEmptyText(t *testing.T) {
calls := ParseToolCalls("", []string{"search"})
if len(calls) != 0 {
t.Fatalf("expected 0 calls for empty text, got %d", len(calls))
}
}
func TestParseToolCallsMultipleTools(t *testing.T) {
text := `{"tool_calls":[{"name":"search","input":{"q":"go"}},{"name":"get_weather","input":{"city":"beijing"}}]}`
calls := ParseToolCalls(text, []string{"search", "get_weather"})
if len(calls) != 2 {
t.Fatalf("expected 2 calls, got %d", len(calls))
}
}
func TestParseToolCallsInputAsString(t *testing.T) {
text := `{"tool_calls":[{"name":"search","input":"{\"q\":\"golang\"}"}]}`
calls := ParseToolCalls(text, []string{"search"})
if len(calls) != 1 {
t.Fatalf("expected 1 call, got %d", len(calls))
}
if calls[0].Input["q"] != "golang" {
t.Fatalf("expected parsed string input, got %#v", calls[0].Input)
}
}
func TestParseToolCallsWithFunctionWrapper(t *testing.T) {
text := `{"tool_calls":[{"function":{"name":"calc","arguments":{"x":1,"y":2}}}]}`
calls := ParseToolCalls(text, []string{"calc"})
if len(calls) != 1 {
t.Fatalf("expected 1 call, got %d", len(calls))
}
if calls[0].Name != "calc" {
t.Fatalf("expected calc, got %q", calls[0].Name)
}
}
func TestParseStandaloneToolCallsFencedCodeBlock(t *testing.T) {
fenced := "Here's an example:\n```json\n{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go\"}}]}\n```\nDon't execute this."
calls := ParseStandaloneToolCalls(fenced, []string{"search"})
if len(calls) != 0 {
t.Fatalf("expected fenced code block to be ignored, got %d calls", len(calls))
}
}
// ─── looksLikeToolExampleContext ─────────────────────────────────────
func TestLooksLikeToolExampleContextNone(t *testing.T) {
if looksLikeToolExampleContext("I will call the tool now") {
t.Fatal("expected false for non-example context")
}
}
func TestLooksLikeToolExampleContextFenced(t *testing.T) {
if !looksLikeToolExampleContext("```json") {
t.Fatal("expected true for fenced code block context")
}
}

View File

@@ -56,9 +56,9 @@ internal/adapter/openai/tool_sieve_core.go
internal/adapter/openai/tool_sieve_xml.go
internal/adapter/openai/tool_sieve_jsonscan.go
internal/util/toolcalls_parse.go
internal/util/toolcalls_candidates.go
internal/util/toolcalls_format.go
internal/toolcall/toolcalls_parse.go
internal/toolcall/toolcalls_candidates.go
internal/toolcall/toolcalls_format.go
internal/adapter/claude/handler_routes.go
internal/adapter/claude/handler_messages.go

105
scripts/lint.sh Executable file
View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
cd "$ROOT_DIR"
LINT_BIN="${GOLANGCI_LINT_BIN:-golangci-lint}"
BOOTSTRAP_VERSION="${GOLANGCI_LINT_VERSION:-v2.11.4}"
BOOTSTRAP_BIN="${ROOT_DIR}/.tmp/golangci-lint-${BOOTSTRAP_VERSION}"
bootstrap_golangci_lint() {
local version_no_v os arch artifact archive_url tmp_dir
version_no_v="${BOOTSTRAP_VERSION#v}"
os="$(uname -s | tr '[:upper:]' '[:lower:]')"
arch="$(uname -m | tr '[:upper:]' '[:lower:]')"
case "$os" in
linux|darwin|windows) ;;
*)
echo "unsupported OS for bootstrap: ${os}" >&2
return 1
;;
esac
case "$arch" in
x86_64|amd64) arch="amd64" ;;
aarch64|arm64) arch="arm64" ;;
*)
echo "unsupported architecture for bootstrap: ${arch}" >&2
return 1
;;
esac
artifact="${os}-${arch}"
archive_url="https://github.com/golangci/golangci-lint/releases/download/${BOOTSTRAP_VERSION}/golangci-lint-${version_no_v}-${artifact}.tar.gz"
mkdir -p "${ROOT_DIR}/.tmp"
tmp_dir="$(mktemp -d)"
trap 'rm -rf "${tmp_dir}"' RETURN
curl -sSfL "${archive_url}" -o "${tmp_dir}/golangci-lint.tar.gz"
tar -xzf "${tmp_dir}/golangci-lint.tar.gz" -C "${tmp_dir}"
cp "${tmp_dir}/golangci-lint-${version_no_v}-${artifact}/golangci-lint" "${BOOTSTRAP_BIN}"
chmod +x "${BOOTSTRAP_BIN}"
echo "bootstrapped golangci-lint ${BOOTSTRAP_VERSION} to ${BOOTSTRAP_BIN}" >&2
}
run_lint() {
local bin="$1"
if [[ "$bin" == *" "* ]]; then
eval "$bin fmt --diff -c .golangci.yml" && eval "$bin run -c .golangci.yml"
else
"$bin" fmt --diff -c .golangci.yml && "$bin" run -c .golangci.yml
fi
}
is_compatibility_error() {
case "$1" in
*"command not found"*|\
*"not recognized as an internal or external command"*|\
*"No such file or directory"*|\
*"unknown command \"fmt\""*|\
*"unknown command \"run\""*|\
*"unknown flag"*|\
*"no such flag"*|\
*"unsupported version of the configuration"*|\
*"can't load config"*)
return 0
;;
*)
return 1
;;
esac
}
# v2 separates formatters from linters; enforce both in one entrypoint.
if lint_output="$(run_lint "$LINT_BIN" 2>&1)"; then
[[ -n "$lint_output" ]] && echo "$lint_output"
exit 0
fi
if [[ -n "${GOLANGCI_LINT_BIN:-}" ]]; then
echo "$lint_output" >&2
echo "lint failed with explicit GOLANGCI_LINT_BIN=${GOLANGCI_LINT_BIN}; skip auto-bootstrap." >&2
exit 1
fi
if ! is_compatibility_error "$lint_output"; then
echo "$lint_output" >&2
exit 1
fi
echo "default golangci-lint appears incompatible; bootstrapping ${BOOTSTRAP_VERSION}..." >&2
if [[ ! -x "${BOOTSTRAP_BIN}" ]]; then
bootstrap_golangci_lint
fi
if lint_output="$(run_lint "${BOOTSTRAP_BIN}" 2>&1)"; then
[[ -n "$lint_output" ]] && echo "$lint_output"
exit 0
fi
echo "$lint_output" >&2
exit 1

View File

@@ -76,6 +76,23 @@ POST /admin/dev/raw-samples/save
./tests/scripts/run-raw-stream-sim.sh
```
运行**全部样本目录**(不只 manifest 默认样本),并逐个打印 token 对齐结果:
```bash
for d in tests/raw_stream_samples/*; do
[ -d "$d" ] || continue
sid="$(basename "$d")"
[ -f "$d/upstream.stream.sse" ] || continue
node tests/tools/deepseek-sse-simulator.mjs --samples-root tests/raw_stream_samples --sample-id "$sid"
done
```
回放输出会显示 `tokens=<parsed>/<expected>`,并在不一致时判定失败;`report.json` 中也会包含:
- `raw_expected_output_tokens`
- `raw_parsed_output_tokens`
- `raw_token_mismatch`
运行单个样本并和已有基线比对:
```bash

View File

@@ -27,7 +27,7 @@ func repairInvalidJSONBackslashes(s string) string {
isHex := true
for j := 1; j <= 4; j++ {
r := runes[i+1+j]
if !((r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')) {
if (r < '0' || r > '9') && (r < 'a' || r > 'f') && (r < 'A' || r > 'F') {
isHex = false
break
}

View File

@@ -17,6 +17,10 @@ trap cleanup EXIT
if ! node --test --test-concurrency=1 tests/node/stream-tool-sieve.test.js tests/node/chat-stream.test.js tests/node/js_compat_test.js "$@" 2>&1 | tee "$NODE_TEST_LOG"; then
echo
echo "[run-unit-node] Node tests failed. 失败摘要如下:"
rg -n "^(not ok|# fail)|ERR_TEST_FAILURE" "$NODE_TEST_LOG" || true
if command -v rg >/dev/null 2>&1; then
rg -n "^(not ok|# fail)|ERR_TEST_FAILURE" "$NODE_TEST_LOG" || true
else
grep -nE "^(not ok|# fail)|ERR_TEST_FAILURE" "$NODE_TEST_LOG" || true
fi
exit 1
fi

View File

@@ -20,6 +20,7 @@ function parseArgs(argv) {
failOnReferenceLeak: true,
failOnMissingFinish: true,
failOnBaselineMismatch: true,
failOnTokenMismatch: true,
showOutput: false,
writeReplayText: false,
};
@@ -43,6 +44,8 @@ function parseArgs(argv) {
out.failOnMissingFinish = false;
} else if (a === '--no-fail-on-baseline-mismatch' || a === '--no-fail-on-processed-mismatch') {
out.failOnBaselineMismatch = false;
} else if (a === '--no-fail-on-token-mismatch') {
out.failOnTokenMismatch = false;
} else if (a === '--show-output') {
out.showOutput = true;
} else if (a === '--write-replay-text' || a === '--write-processed-text') {
@@ -183,6 +186,8 @@ function parseDeepSeekReplay(raw) {
let thinkingText = '';
let textOutput = '';
let parsedChunks = 0;
let parsedOutputTokens = 0;
let expectedOutputTokens = 0;
for (const evt of events) {
if (evt.event === 'finish') {
@@ -198,7 +203,14 @@ function parseDeepSeekReplay(raw) {
continue;
}
parsedChunks += 1;
const expected = extractAccumulatedTokenUsageFromRawChunk(obj);
if (expected > 0) {
expectedOutputTokens = expected;
}
const parsed = parseChunkForContent(obj, true, currentType);
if (parsed.outputTokens > 0) {
parsedOutputTokens = parsed.outputTokens;
}
currentType = parsed.newType;
if (parsed.finished) {
sawFinish = true;
@@ -220,6 +232,9 @@ function parseDeepSeekReplay(raw) {
events: events.length,
parsedChunks,
sawFinish,
parsedOutputTokens,
expectedOutputTokens,
tokenMismatch: expectedOutputTokens > 0 && parsedOutputTokens !== expectedOutputTokens,
outputText,
outputChars: outputText.length,
leakedFinishedText: outputText.includes('FINISHED'),
@@ -228,6 +243,52 @@ function parseDeepSeekReplay(raw) {
};
}
function extractAccumulatedTokenUsageFromRawChunk(v) {
if (Array.isArray(v)) {
for (const item of v) {
const n = extractAccumulatedTokenUsageFromRawChunk(item);
if (n > 0) {
return n;
}
}
return 0;
}
if (!v || typeof v !== 'object') {
return 0;
}
const direct = toTokenInt(v.accumulated_token_usage);
if (direct > 0) {
return direct;
}
const pathValue = typeof v.p === 'string' ? v.p.trim().toLowerCase() : '';
if (pathValue.includes('accumulated_token_usage')) {
const n = toTokenInt(v.v);
if (n > 0) {
return n;
}
}
for (const value of Object.values(v)) {
const n = extractAccumulatedTokenUsageFromRawChunk(value);
if (n > 0) {
return n;
}
}
return 0;
}
function toTokenInt(v) {
if (typeof v === 'number' && Number.isFinite(v)) {
return Math.trunc(v);
}
if (typeof v === 'string' && v.trim() !== '') {
const n = Number(v);
if (Number.isFinite(n)) {
return Math.trunc(n);
}
}
return 0;
}
function parseOpenAIStream(raw) {
const events = parseSSE(raw);
let outputText = '';
@@ -410,12 +471,18 @@ function replaySample(dir, opts) {
if (baselineResult && opts.failOnBaselineMismatch && !baselineMatch) {
errors.push('baseline output mismatch');
}
if (opts.failOnTokenMismatch && rawResult.tokenMismatch) {
errors.push(`token mismatch expected=${rawResult.expectedOutputTokens} parsed=${rawResult.parsedOutputTokens}`);
}
return {
sample_id: path.basename(dir),
raw_events: rawResult.events,
raw_parsed_chunks: rawResult.parsedChunks,
raw_saw_finish: rawResult.sawFinish,
raw_expected_output_tokens: rawResult.expectedOutputTokens,
raw_parsed_output_tokens: rawResult.parsedOutputTokens,
raw_token_mismatch: rawResult.tokenMismatch,
raw_output_chars: rawResult.outputChars,
raw_leaked_finished_text: rawResult.leakedFinishedText,
raw_leaked_reference_markers: rawResult.leakedReferenceMarkers,
@@ -485,6 +552,9 @@ function main() {
raw_events: sample.raw_events,
raw_parsed_chunks: sample.raw_parsed_chunks,
raw_saw_finish: sample.raw_saw_finish,
raw_expected_output_tokens: sample.raw_expected_output_tokens,
raw_parsed_output_tokens: sample.raw_parsed_output_tokens,
raw_token_mismatch: sample.raw_token_mismatch,
raw_output_chars: sample.raw_output_chars,
raw_leaked_finished_text: sample.raw_leaked_finished_text,
raw_leaked_reference_markers: sample.raw_leaked_reference_markers,
@@ -508,7 +578,7 @@ function main() {
? ` baseline=${sample.baseline_output_matches_replay ? 'match' : 'mismatch'}`
: ' baseline=missing';
const note = errors.length > 0 ? ` errors=${errors.join(';')}` : '';
console.log(`[sim] ${status} ${sample.sample_id} events=${sample.raw_events} parsed=${sample.raw_parsed_chunks} chars=${sample.raw_output_chars}${leakNote}${matchNote}${note}`);
console.log(`[sim] ${status} ${sample.sample_id} events=${sample.raw_events} parsed=${sample.raw_parsed_chunks} tokens=${sample.raw_parsed_output_tokens}/${sample.raw_expected_output_tokens} chars=${sample.raw_output_chars}${leakNote}${matchNote}${note}`);
if (opts.showOutput) {
console.log(`[sim] replay output for ${sample.sample_id}:`);
console.log(sample.replay_output_text || '(empty)');