mirror of
https://github.com/CJackHwang/ds2api.git
synced 2026-05-06 09:25:27 +08:00
Compare commits
35 Commits
v2.3.4_bet
...
v2.3.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b3eae22cef | ||
|
|
7af0098d1b | ||
|
|
17405be300 | ||
|
|
5bc03e5de6 | ||
|
|
5a5f93148d | ||
|
|
32dc5b6099 | ||
|
|
7936d4675f | ||
|
|
808eafa7c6 | ||
|
|
bcb8ed6df2 | ||
|
|
8ec5dcc0cc | ||
|
|
88a79f212d | ||
|
|
b1f8d6192f | ||
|
|
acfb3b225d | ||
|
|
99a6164000 | ||
|
|
e49d9d33e2 | ||
|
|
184a3d1e4e | ||
|
|
c4ec14f49a | ||
|
|
fb5fc0e885 | ||
|
|
20b603666d | ||
|
|
4d549b7102 | ||
|
|
33b0d1d144 | ||
|
|
41c0f7ce28 | ||
|
|
efb484ba4f | ||
|
|
145501d4a5 | ||
|
|
2d5103997b | ||
|
|
52e7e7aae8 | ||
|
|
5b5a4000d7 | ||
|
|
2bbf603148 | ||
|
|
d14b8a0664 | ||
|
|
f16e0b579e | ||
|
|
43cbc4aac0 | ||
|
|
cf569f4749 | ||
|
|
c9c59f2490 | ||
|
|
16216cc2ca | ||
|
|
7318d1f4a8 |
98
.env.example
98
.env.example
@@ -1,93 +1,15 @@
|
|||||||
# DS2API environment template (Go runtime)
|
# DS2API runtime
|
||||||
# Copy this file to .env and adjust values.
|
|
||||||
# Updated: 2026-02
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Runtime
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# HTTP listen port (default: 5001)
|
|
||||||
PORT=5001
|
PORT=5001
|
||||||
|
|
||||||
# Log level: DEBUG | INFO | WARN | ERROR
|
|
||||||
LOG_LEVEL=INFO
|
LOG_LEVEL=INFO
|
||||||
|
|
||||||
# Max concurrent inflight requests per account in managed-key mode.
|
# Admin authentication
|
||||||
# Default: 2
|
DS2API_ADMIN_KEY=change-me
|
||||||
# Recommended client concurrency is calculated dynamically as:
|
|
||||||
# account_count * DS2API_ACCOUNT_MAX_INFLIGHT
|
|
||||||
# So by default it is account_count * 2.
|
|
||||||
# Requests beyond inflight slots enter a waiting queue first.
|
|
||||||
# Default queue size equals recommended concurrency, so 429 starts after:
|
|
||||||
# account_count * DS2API_ACCOUNT_MAX_INFLIGHT * 2
|
|
||||||
# Alias: DS2API_ACCOUNT_CONCURRENCY
|
|
||||||
# DS2API_ACCOUNT_MAX_INFLIGHT=2
|
|
||||||
|
|
||||||
# Optional waiting queue size override for managed-key mode.
|
# Config loading (choose one)
|
||||||
# Default: recommended_concurrency (same as account_count * inflight_limit)
|
# 1) file-based config
|
||||||
# Alias: DS2API_ACCOUNT_QUEUE_SIZE
|
DS2API_CONFIG_PATH=/app/config.json
|
||||||
# DS2API_ACCOUNT_MAX_QUEUE=10
|
# 2) inline JSON or Base64 JSON
|
||||||
|
# DS2API_CONFIG_JSON=
|
||||||
|
|
||||||
# ---------------------------------------------------------------
|
# Optional: static admin assets path
|
||||||
# Admin auth
|
# DS2API_STATIC_ADMIN_DIR=/app/static/admin
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Admin key for /admin login and protected admin APIs.
|
|
||||||
# Default is "admin" when unset, but setting it explicitly is recommended.
|
|
||||||
DS2API_ADMIN_KEY=admin
|
|
||||||
|
|
||||||
# Optional JWT signing secret for admin token.
|
|
||||||
# Defaults to DS2API_ADMIN_KEY when unset.
|
|
||||||
# DS2API_JWT_SECRET=change-me
|
|
||||||
|
|
||||||
# Optional admin JWT validity in hours (default: 24)
|
|
||||||
# DS2API_JWT_EXPIRE_HOURS=24
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Config source (choose one)
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Option A: config file path (local/dev recommended)
|
|
||||||
# DS2API_CONFIG_PATH=config.json
|
|
||||||
|
|
||||||
# Option B: JSON string
|
|
||||||
# DS2API_CONFIG_JSON={"keys":["your-api-key"],"accounts":[{"email":"user@example.com","password":"xxx","token":""}]}
|
|
||||||
|
|
||||||
# Option C: Base64 encoded JSON (recommended for Vercel env var)
|
|
||||||
# DS2API_CONFIG_JSON=eyJrZXlzIjpbInlvdXItYXBpLWtleSJdLCJhY2NvdW50cyI6W3siZW1haWwiOiJ1c2VyQGV4YW1wbGUuY29tIiwicGFzc3dvcmQiOiJ4eHgiLCJ0b2tlbiI6IiJ9XX0=
|
|
||||||
#
|
|
||||||
# Generate from local config.json:
|
|
||||||
# DS2API_CONFIG_JSON="$(base64 < config.json | tr -d '\n')"
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Paths (optional)
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# WASM file used for PoW solving
|
|
||||||
# DS2API_WASM_PATH=sha3_wasm_bg.7b9ca65ddd.wasm
|
|
||||||
|
|
||||||
# Built admin static assets directory
|
|
||||||
# DS2API_STATIC_ADMIN_DIR=static/admin
|
|
||||||
|
|
||||||
# Auto-build WebUI on startup when static/admin is missing.
|
|
||||||
# Default: enabled on local/Docker, disabled on Vercel.
|
|
||||||
# DS2API_AUTO_BUILD_WEBUI=true
|
|
||||||
|
|
||||||
# Internal auth secret used by the Vercel hybrid streaming path
|
|
||||||
# (Go prepare endpoint <-> Node stream function).
|
|
||||||
# Optional: falls back to DS2API_ADMIN_KEY when unset.
|
|
||||||
# DS2API_VERCEL_INTERNAL_SECRET=change-me
|
|
||||||
|
|
||||||
# Stream lease TTL seconds for Vercel hybrid streaming.
|
|
||||||
# During this window, the managed account stays occupied until Node calls release.
|
|
||||||
# Default: 900 (15 minutes)
|
|
||||||
# DS2API_VERCEL_STREAM_LEASE_TTL_SECONDS=900
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Vercel sync integration (optional)
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# VERCEL_TOKEN=your-vercel-token
|
|
||||||
# VERCEL_PROJECT_ID=prj_xxxxxxxxxxxx
|
|
||||||
# VERCEL_TEAM_ID=team_xxxxxxxxxxxx
|
|
||||||
|
|
||||||
# Optional: Vercel deployment protection bypass secret.
|
|
||||||
# If deployment protection is enabled, DS2API will use this value as
|
|
||||||
# x-vercel-protection-bypass for internal Node->Go calls on Vercel.
|
|
||||||
# You can also use VERCEL_AUTOMATION_BYPASS_SECRET directly.
|
|
||||||
# DS2API_VERCEL_PROTECTION_BYPASS=your-bypass-secret
|
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ ds2api/
|
|||||||
├── api/
|
├── api/
|
||||||
│ ├── index.go # Vercel Serverless Go entry
|
│ ├── index.go # Vercel Serverless Go entry
|
||||||
│ ├── chat-stream.js # Vercel Node.js stream relay
|
│ ├── chat-stream.js # Vercel Node.js stream relay
|
||||||
│ └── helpers/ # Node.js helper modules
|
│ └── (rewrite targets in vercel.json)
|
||||||
├── internal/
|
├── internal/
|
||||||
│ ├── account/ # Account pool and concurrency queue
|
│ ├── account/ # Account pool and concurrency queue
|
||||||
│ ├── adapter/
|
│ ├── adapter/
|
||||||
@@ -112,6 +112,7 @@ ds2api/
|
|||||||
│ ├── compat/ # Compatibility helpers
|
│ ├── compat/ # Compatibility helpers
|
||||||
│ ├── config/ # Config loading and hot-reload
|
│ ├── config/ # Config loading and hot-reload
|
||||||
│ ├── deepseek/ # DeepSeek client, PoW WASM
|
│ ├── deepseek/ # DeepSeek client, PoW WASM
|
||||||
|
│ ├── js/ # Node runtime stream/compat logic
|
||||||
│ ├── devcapture/ # Dev packet capture
|
│ ├── devcapture/ # Dev packet capture
|
||||||
│ ├── format/ # Output formatting
|
│ ├── format/ # Output formatting
|
||||||
│ ├── prompt/ # Prompt building
|
│ ├── prompt/ # Prompt building
|
||||||
@@ -123,7 +124,9 @@ ds2api/
|
|||||||
│ └── webui/ # WebUI static hosting
|
│ └── webui/ # WebUI static hosting
|
||||||
├── webui/ # React WebUI source
|
├── webui/ # React WebUI source
|
||||||
│ └── src/
|
│ └── src/
|
||||||
│ ├── components/ # Components
|
│ ├── app/ # Routing, auth, config state
|
||||||
|
│ ├── features/ # Feature modules
|
||||||
|
│ ├── components/ # Shared components
|
||||||
│ └── locales/ # Language packs
|
│ └── locales/ # Language packs
|
||||||
├── scripts/ # Build and test scripts
|
├── scripts/ # Build and test scripts
|
||||||
├── static/admin/ # WebUI build output (not committed)
|
├── static/admin/ # WebUI build output (not committed)
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ ds2api/
|
|||||||
├── api/
|
├── api/
|
||||||
│ ├── index.go # Vercel Serverless Go 入口
|
│ ├── index.go # Vercel Serverless Go 入口
|
||||||
│ ├── chat-stream.js # Vercel Node.js 流式转发
|
│ ├── chat-stream.js # Vercel Node.js 流式转发
|
||||||
│ └── helpers/ # Node.js 辅助模块
|
│ └── (rewrite targets in vercel.json)
|
||||||
├── internal/
|
├── internal/
|
||||||
│ ├── account/ # 账号池与并发队列
|
│ ├── account/ # 账号池与并发队列
|
||||||
│ ├── adapter/
|
│ ├── adapter/
|
||||||
@@ -112,6 +112,7 @@ ds2api/
|
|||||||
│ ├── compat/ # 兼容性辅助
|
│ ├── compat/ # 兼容性辅助
|
||||||
│ ├── config/ # 配置加载与热更新
|
│ ├── config/ # 配置加载与热更新
|
||||||
│ ├── deepseek/ # DeepSeek 客户端、PoW WASM
|
│ ├── deepseek/ # DeepSeek 客户端、PoW WASM
|
||||||
|
│ ├── js/ # Node 运行时流式/兼容逻辑
|
||||||
│ ├── devcapture/ # 开发抓包
|
│ ├── devcapture/ # 开发抓包
|
||||||
│ ├── format/ # 输出格式化
|
│ ├── format/ # 输出格式化
|
||||||
│ ├── prompt/ # Prompt 构建
|
│ ├── prompt/ # Prompt 构建
|
||||||
@@ -123,7 +124,9 @@ ds2api/
|
|||||||
│ └── webui/ # WebUI 静态托管
|
│ └── webui/ # WebUI 静态托管
|
||||||
├── webui/ # React WebUI 源码
|
├── webui/ # React WebUI 源码
|
||||||
│ └── src/
|
│ └── src/
|
||||||
│ ├── components/ # 组件
|
│ ├── app/ # 路由、鉴权、配置状态
|
||||||
|
│ ├── features/ # 业务功能模块
|
||||||
|
│ ├── components/ # 通用组件
|
||||||
│ └── locales/ # 语言包
|
│ └── locales/ # 语言包
|
||||||
├── scripts/ # 构建与测试脚本
|
├── scripts/ # 构建与测试脚本
|
||||||
├── static/admin/ # WebUI 构建产物(不提交)
|
├── static/admin/ # WebUI 构建产物(不提交)
|
||||||
|
|||||||
12
DEPLOY.en.md
12
DEPLOY.en.md
@@ -113,12 +113,8 @@ go build -o ds2api ./cmd/ds2api
|
|||||||
# Copy env template
|
# Copy env template
|
||||||
cp .env.example .env
|
cp .env.example .env
|
||||||
|
|
||||||
# Generate single-line Base64 from config.json
|
# Edit .env and set at least:
|
||||||
DS2API_CONFIG_JSON="$(base64 < config.json | tr -d '\n')"
|
|
||||||
|
|
||||||
# Edit .env and set:
|
|
||||||
# DS2API_ADMIN_KEY=your-admin-key
|
# DS2API_ADMIN_KEY=your-admin-key
|
||||||
# DS2API_CONFIG_JSON=${DS2API_CONFIG_JSON}
|
|
||||||
|
|
||||||
# Start
|
# Start
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
@@ -366,7 +362,7 @@ Each archive includes:
|
|||||||
|
|
||||||
- `ds2api` executable (`ds2api.exe` on Windows)
|
- `ds2api` executable (`ds2api.exe` on Windows)
|
||||||
- `static/admin/` (built WebUI assets)
|
- `static/admin/` (built WebUI assets)
|
||||||
- `sha3_wasm_bg.7b9ca65ddd.wasm`
|
- `sha3_wasm_bg.7b9ca65ddd.wasm` (optional; binary has embedded fallback)
|
||||||
- `config.example.json`, `.env.example`
|
- `config.example.json`, `.env.example`
|
||||||
- `README.MD`, `README.en.md`, `LICENSE`
|
- `README.MD`, `README.en.md`, `LICENSE`
|
||||||
|
|
||||||
@@ -455,7 +451,9 @@ server {
|
|||||||
```bash
|
```bash
|
||||||
# Copy compiled binary and related files to target directory
|
# Copy compiled binary and related files to target directory
|
||||||
sudo mkdir -p /opt/ds2api
|
sudo mkdir -p /opt/ds2api
|
||||||
sudo cp ds2api config.json sha3_wasm_bg.7b9ca65ddd.wasm /opt/ds2api/
|
sudo cp ds2api config.json /opt/ds2api/
|
||||||
|
# Optional: if you want to use an external WASM file (override embedded one)
|
||||||
|
# sudo cp sha3_wasm_bg.7b9ca65ddd.wasm /opt/ds2api/
|
||||||
sudo cp -r static/admin /opt/ds2api/static/admin
|
sudo cp -r static/admin /opt/ds2api/static/admin
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
12
DEPLOY.md
12
DEPLOY.md
@@ -113,12 +113,8 @@ go build -o ds2api ./cmd/ds2api
|
|||||||
# 复制环境变量模板
|
# 复制环境变量模板
|
||||||
cp .env.example .env
|
cp .env.example .env
|
||||||
|
|
||||||
# 从 config.json 生成单行 Base64
|
# 编辑 .env(请改成你的强密码),至少设置:
|
||||||
DS2API_CONFIG_JSON="$(base64 < config.json | tr -d '\n')"
|
|
||||||
|
|
||||||
# 编辑 .env(请改成你的强密码),设置:
|
|
||||||
# DS2API_ADMIN_KEY=your-admin-key
|
# DS2API_ADMIN_KEY=your-admin-key
|
||||||
# DS2API_CONFIG_JSON=${DS2API_CONFIG_JSON}
|
|
||||||
|
|
||||||
# 启动
|
# 启动
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
@@ -366,7 +362,7 @@ No Output Directory named "public" found after the Build completed.
|
|||||||
|
|
||||||
- `ds2api` 可执行文件(Windows 为 `ds2api.exe`)
|
- `ds2api` 可执行文件(Windows 为 `ds2api.exe`)
|
||||||
- `static/admin/`(WebUI 构建产物)
|
- `static/admin/`(WebUI 构建产物)
|
||||||
- `sha3_wasm_bg.7b9ca65ddd.wasm`
|
- `sha3_wasm_bg.7b9ca65ddd.wasm`(可选;程序内置 embed fallback)
|
||||||
- `config.example.json`、`.env.example`
|
- `config.example.json`、`.env.example`
|
||||||
- `README.MD`、`README.en.md`、`LICENSE`
|
- `README.MD`、`README.en.md`、`LICENSE`
|
||||||
|
|
||||||
@@ -455,7 +451,9 @@ server {
|
|||||||
```bash
|
```bash
|
||||||
# 将编译好的二进制文件和相关文件复制到目标目录
|
# 将编译好的二进制文件和相关文件复制到目标目录
|
||||||
sudo mkdir -p /opt/ds2api
|
sudo mkdir -p /opt/ds2api
|
||||||
sudo cp ds2api config.json sha3_wasm_bg.7b9ca65ddd.wasm /opt/ds2api/
|
sudo cp ds2api config.json /opt/ds2api/
|
||||||
|
# 可选:若你希望使用外置 WASM 文件(覆盖内置版本)
|
||||||
|
# sudo cp sha3_wasm_bg.7b9ca65ddd.wasm /opt/ds2api/
|
||||||
sudo cp -r static/admin /opt/ds2api/static/admin
|
sudo cp -r static/admin /opt/ds2api/static/admin
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
36
README.MD
36
README.MD
@@ -160,17 +160,13 @@ go run ./cmd/ds2api
|
|||||||
# 1. 准备环境变量文件
|
# 1. 准备环境变量文件
|
||||||
cp .env.example .env
|
cp .env.example .env
|
||||||
|
|
||||||
# 2. 从 config.json 生成 DS2API_CONFIG_JSON(单行 Base64)
|
# 2. 编辑 .env(至少设置 DS2API_ADMIN_KEY)
|
||||||
DS2API_CONFIG_JSON="$(base64 < config.json | tr -d '\n')"
|
|
||||||
|
|
||||||
# 3. 编辑 .env,设置:
|
|
||||||
# DS2API_ADMIN_KEY=请替换为强密码
|
# DS2API_ADMIN_KEY=请替换为强密码
|
||||||
# DS2API_CONFIG_JSON=${DS2API_CONFIG_JSON}
|
|
||||||
|
|
||||||
# 4. 启动
|
# 3. 启动
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
|
|
||||||
# 5. 查看日志
|
# 4. 查看日志
|
||||||
docker-compose logs -f
|
docker-compose logs -f
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -397,7 +393,7 @@ ds2api/
|
|||||||
├── api/
|
├── api/
|
||||||
│ ├── index.go # Vercel Serverless Go 入口
|
│ ├── index.go # Vercel Serverless Go 入口
|
||||||
│ ├── chat-stream.js # Vercel Node.js 流式转发
|
│ ├── chat-stream.js # Vercel Node.js 流式转发
|
||||||
│ └── helpers/ # Node.js 辅助模块
|
│ └── (rewrite targets in vercel.json)
|
||||||
├── internal/
|
├── internal/
|
||||||
│ ├── account/ # 账号池与并发队列
|
│ ├── account/ # 账号池与并发队列
|
||||||
│ ├── adapter/
|
│ ├── adapter/
|
||||||
@@ -410,6 +406,7 @@ ds2api/
|
|||||||
│ ├── compat/ # 兼容性辅助
|
│ ├── compat/ # 兼容性辅助
|
||||||
│ ├── config/ # 配置加载与热更新
|
│ ├── config/ # 配置加载与热更新
|
||||||
│ ├── deepseek/ # DeepSeek API 客户端、PoW WASM
|
│ ├── deepseek/ # DeepSeek API 客户端、PoW WASM
|
||||||
|
│ ├── js/ # Node 运行时流式处理与兼容逻辑
|
||||||
│ ├── devcapture/ # 开发抓包模块
|
│ ├── devcapture/ # 开发抓包模块
|
||||||
│ ├── format/ # 输出格式化
|
│ ├── format/ # 输出格式化
|
||||||
│ ├── prompt/ # Prompt 构建
|
│ ├── prompt/ # Prompt 构建
|
||||||
@@ -420,7 +417,9 @@ ds2api/
|
|||||||
│ └── webui/ # WebUI 静态文件托管与自动构建
|
│ └── webui/ # WebUI 静态文件托管与自动构建
|
||||||
├── webui/ # React WebUI 源码(Vite + Tailwind)
|
├── webui/ # React WebUI 源码(Vite + Tailwind)
|
||||||
│ └── src/
|
│ └── src/
|
||||||
│ ├── components/ # AccountManager / ApiTester / BatchImport / VercelSync / Login / LandingPage
|
│ ├── app/ # 路由、鉴权、配置状态管理
|
||||||
|
│ ├── features/ # 业务功能模块(account/settings/vercel/apiTester)
|
||||||
|
│ ├── components/ # 登录/落地页等通用组件
|
||||||
│ └── locales/ # 中英文语言包(zh.json / en.json)
|
│ └── locales/ # 中英文语言包(zh.json / en.json)
|
||||||
├── scripts/
|
├── scripts/
|
||||||
│ └── build-webui.sh # WebUI 手动构建脚本
|
│ └── build-webui.sh # WebUI 手动构建脚本
|
||||||
@@ -476,6 +475,23 @@ go run ./cmd/ds2api-tests \
|
|||||||
npm ci --prefix webui && npm run build --prefix webui
|
npm ci --prefix webui && npm run build --prefix webui
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## 测试
|
||||||
|
|
||||||
|
详细测试指南请参阅 [TESTING.md](TESTING.md)。
|
||||||
|
|
||||||
|
### 快速测试命令
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 运行所有单元测试
|
||||||
|
go test ./...
|
||||||
|
|
||||||
|
# 运行 tool calls 相关测试(调试工具调用问题)
|
||||||
|
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/util/
|
||||||
|
|
||||||
|
# 运行端到端测试
|
||||||
|
./tests/scripts/run-live.sh
|
||||||
|
```
|
||||||
|
|
||||||
## Release 自动构建(GitHub Actions)
|
## Release 自动构建(GitHub Actions)
|
||||||
|
|
||||||
工作流文件:`.github/workflows/release-artifacts.yml`
|
工作流文件:`.github/workflows/release-artifacts.yml`
|
||||||
@@ -483,7 +499,7 @@ npm ci --prefix webui && npm run build --prefix webui
|
|||||||
- **触发条件**:仅在 GitHub Release `published` 时触发(普通 push 不会触发)
|
- **触发条件**:仅在 GitHub Release `published` 时触发(普通 push 不会触发)
|
||||||
- **构建产物**:多平台二进制包(`linux/amd64`、`linux/arm64`、`darwin/amd64`、`darwin/arm64`、`windows/amd64`)+ `sha256sums.txt`
|
- **构建产物**:多平台二进制包(`linux/amd64`、`linux/arm64`、`darwin/amd64`、`darwin/arm64`、`windows/amd64`)+ `sha256sums.txt`
|
||||||
- **容器镜像发布**:仅推送到 GHCR(`ghcr.io/cjackhwang/ds2api`)
|
- **容器镜像发布**:仅推送到 GHCR(`ghcr.io/cjackhwang/ds2api`)
|
||||||
- **每个压缩包包含**:`ds2api` 可执行文件、`static/admin`、WASM 文件、配置示例、README、LICENSE
|
- **每个压缩包包含**:`ds2api` 可执行文件、`static/admin`、WASM 文件(同时支持内置 fallback)、配置示例、README、LICENSE
|
||||||
|
|
||||||
## 免责声明
|
## 免责声明
|
||||||
|
|
||||||
|
|||||||
19
README.en.md
19
README.en.md
@@ -160,17 +160,13 @@ Default URL: `http://localhost:5001`
|
|||||||
# 1. Prepare env file
|
# 1. Prepare env file
|
||||||
cp .env.example .env
|
cp .env.example .env
|
||||||
|
|
||||||
# 2. Generate DS2API_CONFIG_JSON from config.json (single-line Base64)
|
# 2. Edit .env (at least set DS2API_ADMIN_KEY)
|
||||||
DS2API_CONFIG_JSON="$(base64 < config.json | tr -d '\n')"
|
|
||||||
|
|
||||||
# 3. Edit .env and set:
|
|
||||||
# DS2API_ADMIN_KEY=replace-with-a-strong-secret
|
# DS2API_ADMIN_KEY=replace-with-a-strong-secret
|
||||||
# DS2API_CONFIG_JSON=${DS2API_CONFIG_JSON}
|
|
||||||
|
|
||||||
# 4. Start
|
# 3. Start
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
|
|
||||||
# 5. View logs
|
# 4. View logs
|
||||||
docker-compose logs -f
|
docker-compose logs -f
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -398,7 +394,7 @@ ds2api/
|
|||||||
├── api/
|
├── api/
|
||||||
│ ├── index.go # Vercel Serverless Go entry
|
│ ├── index.go # Vercel Serverless Go entry
|
||||||
│ ├── chat-stream.js # Vercel Node.js stream relay
|
│ ├── chat-stream.js # Vercel Node.js stream relay
|
||||||
│ └── helpers/ # Node.js helper modules
|
│ └── (rewrite targets in vercel.json)
|
||||||
├── internal/
|
├── internal/
|
||||||
│ ├── account/ # Account pool and concurrency queue
|
│ ├── account/ # Account pool and concurrency queue
|
||||||
│ ├── adapter/
|
│ ├── adapter/
|
||||||
@@ -411,6 +407,7 @@ ds2api/
|
|||||||
│ ├── compat/ # Compatibility helpers
|
│ ├── compat/ # Compatibility helpers
|
||||||
│ ├── config/ # Config loading and hot-reload
|
│ ├── config/ # Config loading and hot-reload
|
||||||
│ ├── deepseek/ # DeepSeek API client, PoW WASM
|
│ ├── deepseek/ # DeepSeek API client, PoW WASM
|
||||||
|
│ ├── js/ # Node runtime stream/compat logic
|
||||||
│ ├── devcapture/ # Dev packet capture module
|
│ ├── devcapture/ # Dev packet capture module
|
||||||
│ ├── format/ # Output formatting
|
│ ├── format/ # Output formatting
|
||||||
│ ├── prompt/ # Prompt construction
|
│ ├── prompt/ # Prompt construction
|
||||||
@@ -421,7 +418,9 @@ ds2api/
|
|||||||
│ └── webui/ # WebUI static file serving and auto-build
|
│ └── webui/ # WebUI static file serving and auto-build
|
||||||
├── webui/ # React WebUI source (Vite + Tailwind)
|
├── webui/ # React WebUI source (Vite + Tailwind)
|
||||||
│ └── src/
|
│ └── src/
|
||||||
│ ├── components/ # AccountManager / ApiTester / BatchImport / VercelSync / Login / LandingPage
|
│ ├── app/ # Routing, auth, config state
|
||||||
|
│ ├── features/ # Feature modules (account/settings/vercel/apiTester)
|
||||||
|
│ ├── components/ # Shared UI pieces (login/landing, etc.)
|
||||||
│ └── locales/ # Language packs (zh.json / en.json)
|
│ └── locales/ # Language packs (zh.json / en.json)
|
||||||
├── scripts/
|
├── scripts/
|
||||||
│ └── build-webui.sh # Manual WebUI build script
|
│ └── build-webui.sh # Manual WebUI build script
|
||||||
@@ -484,7 +483,7 @@ Workflow: `.github/workflows/release-artifacts.yml`
|
|||||||
- **Trigger**: only on GitHub Release `published` (normal pushes do not trigger builds)
|
- **Trigger**: only on GitHub Release `published` (normal pushes do not trigger builds)
|
||||||
- **Outputs**: multi-platform archives (`linux/amd64`, `linux/arm64`, `darwin/amd64`, `darwin/arm64`, `windows/amd64`) + `sha256sums.txt`
|
- **Outputs**: multi-platform archives (`linux/amd64`, `linux/arm64`, `darwin/amd64`, `darwin/arm64`, `windows/amd64`) + `sha256sums.txt`
|
||||||
- **Container publishing**: GHCR only (`ghcr.io/cjackhwang/ds2api`)
|
- **Container publishing**: GHCR only (`ghcr.io/cjackhwang/ds2api`)
|
||||||
- **Each archive includes**: `ds2api` executable, `static/admin`, WASM file, config template, README, LICENSE
|
- **Each archive includes**: `ds2api` executable, `static/admin`, WASM file (with embedded fallback support), config template, README, LICENSE
|
||||||
|
|
||||||
## Disclaimer
|
## Disclaimer
|
||||||
|
|
||||||
|
|||||||
46
TESTING.md
46
TESTING.md
@@ -51,7 +51,7 @@ DS2API 提供两个层级的测试:
|
|||||||
1. **Preflight 检查**:
|
1. **Preflight 检查**:
|
||||||
- `go test ./... -count=1`(单元测试)
|
- `go test ./... -count=1`(单元测试)
|
||||||
- `./tests/scripts/check-node-split-syntax.sh`(Node 拆分模块语法门禁)
|
- `./tests/scripts/check-node-split-syntax.sh`(Node 拆分模块语法门禁)
|
||||||
- `node --test`(如仓库存在 Node 单测文件时执行;当前默认以 Go 测试 + Node 语法门禁为主)
|
- `node --test tests/node/stream-tool-sieve.test.js tests/node/chat-stream.test.js tests/node/js_compat_test.js`
|
||||||
- `npm run build --prefix webui`(WebUI 构建检查)
|
- `npm run build --prefix webui`(WebUI 构建检查)
|
||||||
|
|
||||||
2. **隔离启动**:复制 `config.json` 到临时目录,启动独立服务进程
|
2. **隔离启动**:复制 `config.json` 到临时目录,启动独立服务进程
|
||||||
@@ -173,6 +173,50 @@ rg "<trace_id>" artifacts/testsuite/<run_id>/server.log
|
|||||||
go test ./...
|
go test ./...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### 运行特定模块的单元测试
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 运行 tool calls 相关测试(推荐用于调试 tool call 解析问题)
|
||||||
|
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/util/
|
||||||
|
|
||||||
|
# 运行单个测试用例
|
||||||
|
go test -v -run TestParseToolCallsWithDeepSeekHallucination ./internal/util/
|
||||||
|
|
||||||
|
# 运行 format 相关测试
|
||||||
|
go test -v ./internal/format/...
|
||||||
|
|
||||||
|
# 运行 adapter 相关测试
|
||||||
|
go test -v ./internal/adapter/openai/...
|
||||||
|
```
|
||||||
|
|
||||||
|
### 调试 Tool Call 问题 | Debugging Tool Call Issues
|
||||||
|
|
||||||
|
当遇到 DeepSeek 工具调用解析问题时,可以使用以下方法:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. 运行 tool calls 相关的所有测试
|
||||||
|
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/util/
|
||||||
|
|
||||||
|
# 2. 查看测试输出中的详细调试信息
|
||||||
|
go test -v -run TestParseToolCallsWithDeepSeekHallucination ./internal/util/ 2>&1
|
||||||
|
|
||||||
|
# 3. 检查具体测试用例的修复效果
|
||||||
|
# 测试用例位于 internal/util/toolcalls_test.go,包含:
|
||||||
|
# - TestParseToolCallsWithDeepSeekHallucination: DeepSeek 典型幻觉输出
|
||||||
|
# - TestRepairLooseJSONWithNestedObjects: 嵌套对象的方括号修复
|
||||||
|
# - TestParseToolCallsWithMixedWindowsPaths: Windows 路径处理
|
||||||
|
```
|
||||||
|
|
||||||
|
### 运行 Node.js 测试
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 运行 Node 测试
|
||||||
|
node --test tests/node/stream-tool-sieve.test.js
|
||||||
|
|
||||||
|
# 或使用脚本
|
||||||
|
./tests/scripts/run-unit-node.sh
|
||||||
|
```
|
||||||
|
|
||||||
### 跑端到端测试(跳过 preflight)
|
### 跑端到端测试(跳过 preflight)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|||||||
@@ -358,7 +358,7 @@ func TestHandleClaudeStreamRealtimeToolSafetyAcrossStructuredFormats(t *testing.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleClaudeStreamRealtimeDoesNotStopOnUnclosedFencedToolExample(t *testing.T) {
|
func TestHandleClaudeStreamRealtimePromotesUnclosedFencedToolExample(t *testing.T) {
|
||||||
h := &Handler{}
|
h := &Handler{}
|
||||||
resp := makeClaudeSSEHTTPResponse(
|
resp := makeClaudeSSEHTTPResponse(
|
||||||
"data: {\"p\":\"response/content\",\"v\":\"Here is an example:\\n```json\\n{\\\"tool_calls\\\":[{\\\"name\\\":\\\"Bash\\\",\\\"input\\\":{\\\"command\\\":\\\"pwd\\\"}}]}\"}",
|
"data: {\"p\":\"response/content\",\"v\":\"Here is an example:\\n```json\\n{\\\"tool_calls\\\":[{\\\"name\\\":\\\"Bash\\\",\\\"input\\\":{\\\"command\\\":\\\"pwd\\\"}}]}\"}",
|
||||||
@@ -371,22 +371,27 @@ func TestHandleClaudeStreamRealtimeDoesNotStopOnUnclosedFencedToolExample(t *tes
|
|||||||
h.handleClaudeStreamRealtime(rec, req, resp, "claude-sonnet-4-5", []any{map[string]any{"role": "user", "content": "show example only"}}, false, false, []string{"Bash"})
|
h.handleClaudeStreamRealtime(rec, req, resp, "claude-sonnet-4-5", []any{map[string]any{"role": "user", "content": "show example only"}}, false, false, []string{"Bash"})
|
||||||
|
|
||||||
frames := parseClaudeFrames(t, rec.Body.String())
|
frames := parseClaudeFrames(t, rec.Body.String())
|
||||||
|
foundToolUse := false
|
||||||
for _, f := range findClaudeFrames(frames, "content_block_start") {
|
for _, f := range findClaudeFrames(frames, "content_block_start") {
|
||||||
contentBlock, _ := f.Payload["content_block"].(map[string]any)
|
contentBlock, _ := f.Payload["content_block"].(map[string]any)
|
||||||
if contentBlock["type"] == "tool_use" {
|
if contentBlock["type"] == "tool_use" {
|
||||||
t.Fatalf("unexpected tool_use for fenced example, body=%s", rec.Body.String())
|
foundToolUse = true
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
foundEndTurn := false
|
|
||||||
for _, f := range findClaudeFrames(frames, "message_delta") {
|
|
||||||
delta, _ := f.Payload["delta"].(map[string]any)
|
|
||||||
if delta["stop_reason"] == "end_turn" {
|
|
||||||
foundEndTurn = true
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !foundEndTurn {
|
if !foundToolUse {
|
||||||
t.Fatalf("expected stop_reason=end_turn, body=%s", rec.Body.String())
|
t.Fatalf("expected tool_use for fenced example, body=%s", rec.Body.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
foundToolStop := false
|
||||||
|
for _, f := range findClaudeFrames(frames, "message_delta") {
|
||||||
|
delta, _ := f.Payload["delta"].(map[string]any)
|
||||||
|
if delta["stop_reason"] == "tool_use" {
|
||||||
|
foundToolStop = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !foundToolStop {
|
||||||
|
t.Fatalf("expected stop_reason=tool_use, body=%s", rec.Body.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -98,11 +98,11 @@ func (s *chatStreamRuntime) sendDone() {
|
|||||||
func (s *chatStreamRuntime) finalize(finishReason string) {
|
func (s *chatStreamRuntime) finalize(finishReason string) {
|
||||||
finalThinking := s.thinking.String()
|
finalThinking := s.thinking.String()
|
||||||
finalText := s.text.String()
|
finalText := s.text.String()
|
||||||
detected := util.ParseStandaloneToolCalls(finalText, s.toolNames)
|
detected := util.ParseStandaloneToolCallsDetailed(finalText, s.toolNames)
|
||||||
if len(detected) > 0 && !s.toolCallsDoneEmitted {
|
if len(detected.Calls) > 0 && !s.toolCallsDoneEmitted {
|
||||||
finishReason = "tool_calls"
|
finishReason = "tool_calls"
|
||||||
delta := map[string]any{
|
delta := map[string]any{
|
||||||
"tool_calls": formatFinalStreamToolCallsWithStableIDs(detected, s.streamToolCallIDs),
|
"tool_calls": formatFinalStreamToolCallsWithStableIDs(detected.Calls, s.streamToolCallIDs),
|
||||||
}
|
}
|
||||||
if !s.firstChunkSent {
|
if !s.firstChunkSent {
|
||||||
delta["role"] = "assistant"
|
delta["role"] = "assistant"
|
||||||
@@ -158,7 +158,7 @@ func (s *chatStreamRuntime) finalize(finishReason string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(detected) > 0 || s.toolCallsEmitted {
|
if len(detected.Calls) > 0 || s.toolCallsEmitted {
|
||||||
finishReason = "tool_calls"
|
finishReason = "tool_calls"
|
||||||
}
|
}
|
||||||
s.sendChunk(openaifmt.BuildChatStreamChunk(
|
s.sendChunk(openaifmt.BuildChatStreamChunk(
|
||||||
|
|||||||
@@ -42,7 +42,9 @@ func (h *Handler) ChatCompletions(w http.ResponseWriter, r *http.Request) {
|
|||||||
// 2. 新请求可能获取到同一账号并开始使用
|
// 2. 新请求可能获取到同一账号并开始使用
|
||||||
// 3. 异步删除仍在进行,会截断新请求正在使用的会话
|
// 3. 异步删除仍在进行,会截断新请求正在使用的会话
|
||||||
if h.Store.AutoDeleteSessions() && a.DeepSeekToken != "" {
|
if h.Store.AutoDeleteSessions() && a.DeepSeekToken != "" {
|
||||||
err := h.DS.DeleteAllSessionsForToken(context.Background(), a.DeepSeekToken)
|
deleteCtx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
err := h.DS.DeleteAllSessionsForToken(deleteCtx, a.DeepSeekToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
config.Logger.Warn("[auto_delete_sessions] failed", "account", a.AccountID, "error", err)
|
config.Logger.Warn("[auto_delete_sessions] failed", "account", a.AccountID, "error", err)
|
||||||
} else {
|
} else {
|
||||||
@@ -51,7 +53,7 @@ func (h *Handler) ChatCompletions(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
h.Auth.Release(a)
|
h.Auth.Release(a)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
r = r.WithContext(auth.WithAuth(r.Context(), a))
|
r = r.WithContext(auth.WithAuth(r.Context(), a))
|
||||||
|
|
||||||
var req map[string]any
|
var req map[string]any
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ func injectToolPrompt(messages []map[string]any, tools []any, policy util.ToolCh
|
|||||||
if len(toolSchemas) == 0 {
|
if len(toolSchemas) == 0 {
|
||||||
return messages, names
|
return messages, names
|
||||||
}
|
}
|
||||||
toolPrompt := "You have access to these tools:\n\n" + strings.Join(toolSchemas, "\n\n") + "\n\nWhen you need to use tools, output ONLY this JSON format (no other text):\n{\"tool_calls\": [{\"name\": \"tool_name\", \"input\": {\"param\": \"value\"}}]}\n\nHistory markers in conversation:\n- [TOOL_CALL_HISTORY]...[/TOOL_CALL_HISTORY] means a tool call you already made earlier.\n- [TOOL_RESULT_HISTORY]...[/TOOL_RESULT_HISTORY] means the runtime returned a tool result (not user input).\n\nIMPORTANT:\n1) If calling tools, output ONLY the JSON. The response must start with { and end with }.\n2) After receiving a tool result, you MUST use it to produce the final answer.\n3) Only call another tool when the previous result is missing required data or returned an error.\n4) Do not repeat a tool call that is already satisfied by an existing [TOOL_RESULT_HISTORY] block."
|
toolPrompt := "You have access to these tools:\n\n" + strings.Join(toolSchemas, "\n\n") + "\n\nWhen you need to use tools, output ONLY a JSON code block like this:\n```json\n{\"tool_calls\": [{\"name\": \"tool_name\", \"input\": {\"param\": \"value\"}}]}\n```\n\n【EXAMPLE】\nUser: Please check the weather in Beijing and Shanghai, and update my todo list.\nAssistant:\n```json\n{\"tool_calls\": [\n {\"name\": \"get_weather\", \"input\": {\"city\": \"Beijing\"}},\n {\"name\": \"get_weather\", \"input\": {\"city\": \"Shanghai\"}},\n {\"name\": \"update_todo\", \"input\": {\"todos\": [{\"content\": \"Buy milk\"}, {\"content\": \"Write report\"}]}}\n]}\n```\n\nHistory markers in conversation:\n- [TOOL_CALL_HISTORY]...[/TOOL_CALL_HISTORY] means a tool call you already made earlier.\n- [TOOL_RESULT_HISTORY]...[/TOOL_RESULT_HISTORY] means the runtime returned a tool result (not user input).\n\nIMPORTANT:\n1) If calling tools, output ONLY the JSON code block. The response must start with ```json and end with ```.\n2) After receiving a tool result, you MUST use it to produce the final answer.\n3) Only call another tool when the previous result is missing required data or returned an error.\n4) Do not repeat a tool call that is already satisfied by an existing [TOOL_RESULT_HISTORY] block.\n5) JSON SYNTAX STRICTLY REQUIRED: All property names MUST be enclosed in double quotes (e.g., \"name\", not name).\n6) ARRAY FORMAT: If providing a list of items, you MUST enclose them in square brackets `[]` (e.g., \"todos\": [{\"item\": \"a\"}, {\"item\": \"b\"}]). DO NOT output comma-separated objects without brackets."
|
||||||
if policy.Mode == util.ToolChoiceRequired {
|
if policy.Mode == util.ToolChoiceRequired {
|
||||||
toolPrompt += "\n5) For this response, you MUST call at least one tool from the allowed list."
|
toolPrompt += "\n5) For this response, you MUST call at least one tool from the allowed list."
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -211,7 +211,7 @@ func TestHandleNonStreamUnknownToolNotIntercepted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleNonStreamEmbeddedToolCallExampleRemainsText(t *testing.T) {
|
func TestHandleNonStreamEmbeddedToolCallExamplePromotesToolCall(t *testing.T) {
|
||||||
h := &Handler{}
|
h := &Handler{}
|
||||||
resp := makeSSEHTTPResponse(
|
resp := makeSSEHTTPResponse(
|
||||||
`data: {"p":"response/content","v":"下面是示例:"}`,
|
`data: {"p":"response/content","v":"下面是示例:"}`,
|
||||||
@@ -229,20 +229,21 @@ func TestHandleNonStreamEmbeddedToolCallExampleRemainsText(t *testing.T) {
|
|||||||
out := decodeJSONBody(t, rec.Body.String())
|
out := decodeJSONBody(t, rec.Body.String())
|
||||||
choices, _ := out["choices"].([]any)
|
choices, _ := out["choices"].([]any)
|
||||||
choice, _ := choices[0].(map[string]any)
|
choice, _ := choices[0].(map[string]any)
|
||||||
if choice["finish_reason"] != "stop" {
|
if choice["finish_reason"] != "tool_calls" {
|
||||||
t.Fatalf("expected finish_reason=stop, got %#v", choice["finish_reason"])
|
t.Fatalf("expected finish_reason=tool_calls, got %#v", choice["finish_reason"])
|
||||||
}
|
}
|
||||||
msg, _ := choice["message"].(map[string]any)
|
msg, _ := choice["message"].(map[string]any)
|
||||||
if _, ok := msg["tool_calls"]; ok {
|
toolCalls, _ := msg["tool_calls"].([]any)
|
||||||
t.Fatalf("did not expect tool_calls field for embedded example: %#v", msg["tool_calls"])
|
if len(toolCalls) != 1 {
|
||||||
|
t.Fatalf("expected one tool_call field for embedded example: %#v", msg["tool_calls"])
|
||||||
}
|
}
|
||||||
content, _ := msg["content"].(string)
|
content, _ := msg["content"].(string)
|
||||||
if !strings.Contains(content, "下面是示例:") || !strings.Contains(content, "请勿执行。") || !strings.Contains(content, `"tool_calls"`) {
|
if strings.Contains(content, `"tool_calls"`) {
|
||||||
t.Fatalf("expected embedded example to remain plain text, got %#v", content)
|
t.Fatalf("expected raw tool_calls json stripped from content, got %#v", content)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleNonStreamFencedToolCallExampleNotIntercepted(t *testing.T) {
|
func TestHandleNonStreamFencedToolCallExamplePromotesToolCall(t *testing.T) {
|
||||||
h := &Handler{}
|
h := &Handler{}
|
||||||
resp := makeSSEHTTPResponse(
|
resp := makeSSEHTTPResponse(
|
||||||
"data: {\"p\":\"response/content\",\"v\":\"```json\\n{\\\"tool_calls\\\":[{\\\"name\\\":\\\"search\\\",\\\"input\\\":{\\\"q\\\":\\\"go\\\"}}]}\\n```\"}",
|
"data: {\"p\":\"response/content\",\"v\":\"```json\\n{\\\"tool_calls\\\":[{\\\"name\\\":\\\"search\\\",\\\"input\\\":{\\\"q\\\":\\\"go\\\"}}]}\\n```\"}",
|
||||||
@@ -258,16 +259,17 @@ func TestHandleNonStreamFencedToolCallExampleNotIntercepted(t *testing.T) {
|
|||||||
out := decodeJSONBody(t, rec.Body.String())
|
out := decodeJSONBody(t, rec.Body.String())
|
||||||
choices, _ := out["choices"].([]any)
|
choices, _ := out["choices"].([]any)
|
||||||
choice, _ := choices[0].(map[string]any)
|
choice, _ := choices[0].(map[string]any)
|
||||||
if choice["finish_reason"] != "stop" {
|
if choice["finish_reason"] != "tool_calls" {
|
||||||
t.Fatalf("expected finish_reason=stop, got %#v", choice["finish_reason"])
|
t.Fatalf("expected finish_reason=tool_calls, got %#v", choice["finish_reason"])
|
||||||
}
|
}
|
||||||
msg, _ := choice["message"].(map[string]any)
|
msg, _ := choice["message"].(map[string]any)
|
||||||
if _, ok := msg["tool_calls"]; ok {
|
toolCalls, _ := msg["tool_calls"].([]any)
|
||||||
t.Fatalf("did not expect tool_calls field for fenced example: %#v", msg["tool_calls"])
|
if len(toolCalls) != 1 {
|
||||||
|
t.Fatalf("expected one tool_call field for fenced example: %#v", msg["tool_calls"])
|
||||||
}
|
}
|
||||||
content, _ := msg["content"].(string)
|
content, _ := msg["content"].(string)
|
||||||
if !strings.Contains(content, "```json") || !strings.Contains(content, `"tool_calls"`) {
|
if strings.Contains(content, `"tool_calls"`) {
|
||||||
t.Fatalf("expected fenced tool example to pass through as text, got %q", content)
|
t.Fatalf("expected raw tool_calls json stripped from content, got %q", content)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -615,7 +617,7 @@ func TestHandleStreamToolCallWithSameChunkTrailingTextRemainsText(t *testing.T)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleStreamFencedToolCallSnippetRemainsText(t *testing.T) {
|
func TestHandleStreamFencedToolCallSnippetPromotesToolCall(t *testing.T) {
|
||||||
h := &Handler{}
|
h := &Handler{}
|
||||||
resp := makeSSEHTTPResponse(
|
resp := makeSSEHTTPResponse(
|
||||||
fmt.Sprintf(`data: {"p":"response/content","v":%q}`, "下面是调用示例:\n```json\n"),
|
fmt.Sprintf(`data: {"p":"response/content","v":%q}`, "下面是调用示例:\n```json\n"),
|
||||||
@@ -631,8 +633,8 @@ func TestHandleStreamFencedToolCallSnippetRemainsText(t *testing.T) {
|
|||||||
if !done {
|
if !done {
|
||||||
t.Fatalf("expected [DONE], body=%s", rec.Body.String())
|
t.Fatalf("expected [DONE], body=%s", rec.Body.String())
|
||||||
}
|
}
|
||||||
if streamHasToolCallsDelta(frames) {
|
if !streamHasToolCallsDelta(frames) {
|
||||||
t.Fatalf("did not expect tool_calls delta for fenced snippet, body=%s", rec.Body.String())
|
t.Fatalf("expected tool_calls delta for fenced snippet, body=%s", rec.Body.String())
|
||||||
}
|
}
|
||||||
content := strings.Builder{}
|
content := strings.Builder{}
|
||||||
for _, frame := range frames {
|
for _, frame := range frames {
|
||||||
@@ -646,11 +648,11 @@ func TestHandleStreamFencedToolCallSnippetRemainsText(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
got := content.String()
|
got := content.String()
|
||||||
if !strings.Contains(got, "```json") || !strings.Contains(strings.ToLower(got), "tool_calls") {
|
if strings.Contains(strings.ToLower(got), "tool_calls") {
|
||||||
t.Fatalf("expected fenced tool snippet in content, got=%q", got)
|
t.Fatalf("expected raw fenced tool_calls snippet stripped from content, got=%q", got)
|
||||||
}
|
}
|
||||||
if streamFinishReason(frames) != "stop" {
|
if streamFinishReason(frames) != "tool_calls" {
|
||||||
t.Fatalf("expected finish_reason=stop, body=%s", rec.Body.String())
|
t.Fatalf("expected finish_reason=tool_calls, body=%s", rec.Body.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -297,7 +297,7 @@ func TestHandleResponsesStreamOutputTextDeltaCarriesItemIndexes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleResponsesStreamThinkingAndMixedToolExampleRemainMessageOnly(t *testing.T) {
|
func TestHandleResponsesStreamThinkingAndMixedToolExampleEmitsFunctionCall(t *testing.T) {
|
||||||
h := &Handler{}
|
h := &Handler{}
|
||||||
req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
|
req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
@@ -333,6 +333,7 @@ func TestHandleResponsesStreamThinkingAndMixedToolExampleRemainMessageOnly(t *te
|
|||||||
responseObj, _ := completedPayload["response"].(map[string]any)
|
responseObj, _ := completedPayload["response"].(map[string]any)
|
||||||
output, _ := responseObj["output"].([]any)
|
output, _ := responseObj["output"].([]any)
|
||||||
hasMessage := false
|
hasMessage := false
|
||||||
|
hasFunctionCall := false
|
||||||
for _, item := range output {
|
for _, item := range output {
|
||||||
m, _ := item.(map[string]any)
|
m, _ := item.(map[string]any)
|
||||||
if m == nil {
|
if m == nil {
|
||||||
@@ -342,12 +343,15 @@ func TestHandleResponsesStreamThinkingAndMixedToolExampleRemainMessageOnly(t *te
|
|||||||
hasMessage = true
|
hasMessage = true
|
||||||
}
|
}
|
||||||
if asString(m["type"]) == "function_call" {
|
if asString(m["type"]) == "function_call" {
|
||||||
t.Fatalf("did not expect function_call output for mixed prose tool example, output=%#v", output)
|
hasFunctionCall = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !hasMessage {
|
if !hasMessage {
|
||||||
t.Fatalf("expected message output for mixed prose tool example, output=%#v", output)
|
t.Fatalf("expected message output for mixed prose tool example, output=%#v", output)
|
||||||
}
|
}
|
||||||
|
if !hasFunctionCall {
|
||||||
|
t.Fatalf("expected function_call output for mixed prose tool example, output=%#v", output)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleResponsesStreamToolChoiceNoneRejectsFunctionCall(t *testing.T) {
|
func TestHandleResponsesStreamToolChoiceNoneRejectsFunctionCall(t *testing.T) {
|
||||||
|
|||||||
@@ -171,15 +171,15 @@ func TestResponsesNonStreamMixedProseToolPayloadHandlerPath(t *testing.T) {
|
|||||||
t.Fatalf("decode response failed: %v body=%s", err, rec.Body.String())
|
t.Fatalf("decode response failed: %v body=%s", err, rec.Body.String())
|
||||||
}
|
}
|
||||||
outputText, _ := out["output_text"].(string)
|
outputText, _ := out["output_text"].(string)
|
||||||
if outputText == "" {
|
if outputText != "" {
|
||||||
t.Fatalf("expected output_text preserved for mixed prose payload")
|
t.Fatalf("expected output_text hidden for mixed prose tool payload, got %q", outputText)
|
||||||
}
|
}
|
||||||
output, _ := out["output"].([]any)
|
output, _ := out["output"].([]any)
|
||||||
if len(output) != 1 {
|
if len(output) != 1 {
|
||||||
t.Fatalf("expected one output item, got %#v", output)
|
t.Fatalf("expected one output item, got %#v", output)
|
||||||
}
|
}
|
||||||
first, _ := output[0].(map[string]any)
|
first, _ := output[0].(map[string]any)
|
||||||
if first["type"] != "message" {
|
if first["type"] != "function_call" {
|
||||||
t.Fatalf("expected message output item, got %#v", output)
|
t.Fatalf("expected function_call output item, got %#v", output)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -167,22 +167,22 @@ func findToolSegmentStart(s string) int {
|
|||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
lower := strings.ToLower(s)
|
lower := strings.ToLower(s)
|
||||||
offset := 0
|
keywords := []string{"tool_calls", "function.name:", "[tool_call_history]"}
|
||||||
for {
|
bestKeyIdx := -1
|
||||||
keyRel := strings.Index(lower[offset:], "tool_calls")
|
for _, kw := range keywords {
|
||||||
if keyRel < 0 {
|
idx := strings.Index(lower, kw)
|
||||||
return -1
|
if idx >= 0 && (bestKeyIdx < 0 || idx < bestKeyIdx) {
|
||||||
|
bestKeyIdx = idx
|
||||||
}
|
}
|
||||||
keyIdx := offset + keyRel
|
|
||||||
start := strings.LastIndex(s[:keyIdx], "{")
|
|
||||||
if start < 0 {
|
|
||||||
start = keyIdx
|
|
||||||
}
|
|
||||||
if !insideCodeFence(s[:start]) {
|
|
||||||
return start
|
|
||||||
}
|
|
||||||
offset = keyIdx + len("tool_calls")
|
|
||||||
}
|
}
|
||||||
|
if bestKeyIdx < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
start := strings.LastIndex(s[:bestKeyIdx], "{")
|
||||||
|
if start < 0 {
|
||||||
|
start = bestKeyIdx
|
||||||
|
}
|
||||||
|
return start
|
||||||
}
|
}
|
||||||
|
|
||||||
func consumeToolCapture(state *toolStreamSieveState, toolNames []string) (prefix string, calls []util.ParsedToolCall, suffix string, ready bool) {
|
func consumeToolCapture(state *toolStreamSieveState, toolNames []string) (prefix string, calls []util.ParsedToolCall, suffix string, ready bool) {
|
||||||
@@ -191,13 +191,22 @@ func consumeToolCapture(state *toolStreamSieveState, toolNames []string) (prefix
|
|||||||
return "", nil, "", false
|
return "", nil, "", false
|
||||||
}
|
}
|
||||||
lower := strings.ToLower(captured)
|
lower := strings.ToLower(captured)
|
||||||
keyIdx := strings.Index(lower, "tool_calls")
|
|
||||||
|
keyIdx := -1
|
||||||
|
keywords := []string{"tool_calls", "function.name:", "[tool_call_history]"}
|
||||||
|
for _, kw := range keywords {
|
||||||
|
idx := strings.Index(lower, kw)
|
||||||
|
if idx >= 0 && (keyIdx < 0 || idx < keyIdx) {
|
||||||
|
keyIdx = idx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if keyIdx < 0 {
|
if keyIdx < 0 {
|
||||||
return "", nil, "", false
|
return "", nil, "", false
|
||||||
}
|
}
|
||||||
start := strings.LastIndex(captured[:keyIdx], "{")
|
start := strings.LastIndex(captured[:keyIdx], "{")
|
||||||
if start < 0 {
|
if start < 0 {
|
||||||
return "", nil, "", false
|
start = keyIdx
|
||||||
}
|
}
|
||||||
obj, end, ok := extractJSONObjectFrom(captured, start)
|
obj, end, ok := extractJSONObjectFrom(captured, start)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -205,9 +214,6 @@ func consumeToolCapture(state *toolStreamSieveState, toolNames []string) (prefix
|
|||||||
}
|
}
|
||||||
prefixPart := captured[:start]
|
prefixPart := captured[:start]
|
||||||
suffixPart := captured[end:]
|
suffixPart := captured[end:]
|
||||||
if insideCodeFence(state.recentTextTail + prefixPart) {
|
|
||||||
return captured, nil, "", true
|
|
||||||
}
|
|
||||||
parsed := util.ParseStandaloneToolCallsDetailed(obj, toolNames)
|
parsed := util.ParseStandaloneToolCallsDetailed(obj, toolNames)
|
||||||
if len(parsed.Calls) == 0 {
|
if len(parsed.Calls) == 0 {
|
||||||
if parsed.SawToolCallSyntax && parsed.RejectedByPolicy {
|
if parsed.SawToolCallSyntax && parsed.RejectedByPolicy {
|
||||||
@@ -215,6 +221,9 @@ func consumeToolCapture(state *toolStreamSieveState, toolNames []string) (prefix
|
|||||||
// consume it to avoid leaking raw tool_calls JSON to user content.
|
// consume it to avoid leaking raw tool_calls JSON to user content.
|
||||||
return prefixPart, nil, suffixPart, true
|
return prefixPart, nil, suffixPart, true
|
||||||
}
|
}
|
||||||
|
// If it has obvious keywords but failed to parse even after loose repair,
|
||||||
|
// we still might want to intercept it if it looks like an attempt at tool call.
|
||||||
|
// For now, keep the original logic but rely on loose JSON repair.
|
||||||
return captured, nil, "", true
|
return captured, nil, "", true
|
||||||
}
|
}
|
||||||
return prefixPart, parsed.Calls, suffixPart, true
|
return prefixPart, parsed.Calls, suffixPart, true
|
||||||
|
|||||||
@@ -19,9 +19,6 @@ func buildIncrementalToolDeltas(state *toolStreamSieveState) []toolCallDelta {
|
|||||||
if start < 0 {
|
if start < 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if insideCodeFence(state.recentTextTail + captured[:start]) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
certainSingle, hasMultiple := classifyToolCallsIncrementalSafety(captured, keyIdx)
|
certainSingle, hasMultiple := classifyToolCallsIncrementalSafety(captured, keyIdx)
|
||||||
if hasMultiple {
|
if hasMultiple {
|
||||||
state.disableDeltas = true
|
state.disableDeltas = true
|
||||||
|
|||||||
@@ -247,8 +247,18 @@ func (h *Handler) deleteAllSessions(w http.ResponseWriter, r *http.Request) {
|
|||||||
// 删除所有会话
|
// 删除所有会话
|
||||||
err := h.DS.DeleteAllSessionsForToken(r.Context(), token)
|
err := h.DS.DeleteAllSessionsForToken(r.Context(), token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeJSON(w, http.StatusOK, map[string]any{"success": false, "message": "删除失败: " + err.Error()})
|
// token 可能过期,尝试重新登录并重试一次
|
||||||
return
|
newToken, loginErr := h.DS.Login(r.Context(), acc)
|
||||||
|
if loginErr != nil {
|
||||||
|
writeJSON(w, http.StatusOK, map[string]any{"success": false, "message": "删除失败: " + err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
token = newToken
|
||||||
|
_ = h.Store.UpdateAccountToken(acc.Identifier(), token)
|
||||||
|
if retryErr := h.DS.DeleteAllSessionsForToken(r.Context(), token); retryErr != nil {
|
||||||
|
writeJSON(w, http.StatusOK, map[string]any{"success": false, "message": "删除失败: " + retryErr.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
writeJSON(w, http.StatusOK, map[string]any{"success": true, "message": "删除成功"})
|
writeJSON(w, http.StatusOK, map[string]any{"success": true, "message": "删除成功"})
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
package admin
|
package admin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -13,10 +16,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type testingDSMock struct {
|
type testingDSMock struct {
|
||||||
loginCalls int
|
loginCalls int
|
||||||
createSessionCalls int
|
createSessionCalls int
|
||||||
getPowCalls int
|
getPowCalls int
|
||||||
callCompletionCalls int
|
callCompletionCalls int
|
||||||
|
deleteAllSessionsCalls int
|
||||||
|
deleteAllSessionsError error
|
||||||
|
deleteAllSessionsErrorOnce bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *testingDSMock) Login(_ context.Context, _ config.Account) (string, error) {
|
func (m *testingDSMock) Login(_ context.Context, _ config.Account) (string, error) {
|
||||||
@@ -40,6 +46,14 @@ func (m *testingDSMock) CallCompletion(_ context.Context, _ *auth.RequestAuth, _
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *testingDSMock) DeleteAllSessionsForToken(_ context.Context, _ string) error {
|
func (m *testingDSMock) DeleteAllSessionsForToken(_ context.Context, _ string) error {
|
||||||
|
m.deleteAllSessionsCalls++
|
||||||
|
if m.deleteAllSessionsError != nil {
|
||||||
|
err := m.deleteAllSessionsError
|
||||||
|
if m.deleteAllSessionsErrorOnce {
|
||||||
|
m.deleteAllSessionsError = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,3 +97,38 @@ func TestTestAccount_BatchModeOnlyCreatesSession(t *testing.T) {
|
|||||||
t.Fatalf("expected test status ok, got %q", updated.TestStatus)
|
t.Fatalf("expected test status ok, got %q", updated.TestStatus)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDeleteAllSessions_RetryWithReloginOnDeleteFailure(t *testing.T) {
|
||||||
|
t.Setenv("DS2API_CONFIG_JSON", `{"accounts":[{"email":"batch@example.com","password":"pwd","token":"expired-token"}]}`)
|
||||||
|
store := config.LoadStore()
|
||||||
|
ds := &testingDSMock{deleteAllSessionsError: errors.New("token expired"), deleteAllSessionsErrorOnce: true}
|
||||||
|
h := &Handler{Store: store, DS: ds}
|
||||||
|
|
||||||
|
req := httptest.NewRequest(http.MethodPost, "/delete-all", bytes.NewBufferString(`{"identifier":"batch@example.com"}`))
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
h.deleteAllSessions(rec, req)
|
||||||
|
|
||||||
|
if rec.Code != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", rec.Code)
|
||||||
|
}
|
||||||
|
var resp map[string]any
|
||||||
|
if err := json.Unmarshal(rec.Body.Bytes(), &resp); err != nil {
|
||||||
|
t.Fatalf("unmarshal response: %v", err)
|
||||||
|
}
|
||||||
|
if ok, _ := resp["success"].(bool); !ok {
|
||||||
|
t.Fatalf("expected success response, got %#v", resp)
|
||||||
|
}
|
||||||
|
if ds.loginCalls != 1 {
|
||||||
|
t.Fatalf("expected relogin once, got %d", ds.loginCalls)
|
||||||
|
}
|
||||||
|
if ds.deleteAllSessionsCalls != 2 {
|
||||||
|
t.Fatalf("expected delete called twice, got %d", ds.deleteAllSessionsCalls)
|
||||||
|
}
|
||||||
|
updated, ok := store.FindAccount("batch@example.com")
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("expected account")
|
||||||
|
}
|
||||||
|
if updated.Token != "new-token" {
|
||||||
|
t.Fatalf("expected refreshed token persisted, got %q", updated.Token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -62,8 +62,8 @@ func (c *Client) CreateSession(ctx context.Context, a *auth.RequestAuth, maxAtte
|
|||||||
attempts++
|
attempts++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
code := intFrom(resp["code"])
|
code, bizCode, msg, bizMsg := extractResponseStatus(resp)
|
||||||
if status == http.StatusOK && code == 0 {
|
if status == http.StatusOK && code == 0 && bizCode == 0 {
|
||||||
data, _ := resp["data"].(map[string]any)
|
data, _ := resp["data"].(map[string]any)
|
||||||
bizData, _ := data["biz_data"].(map[string]any)
|
bizData, _ := data["biz_data"].(map[string]any)
|
||||||
sessionID, _ := bizData["id"].(string)
|
sessionID, _ := bizData["id"].(string)
|
||||||
@@ -71,10 +71,9 @@ func (c *Client) CreateSession(ctx context.Context, a *auth.RequestAuth, maxAtte
|
|||||||
return sessionID, nil
|
return sessionID, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
msg, _ := resp["msg"].(string)
|
config.Logger.Warn("[create_session] failed", "status", status, "code", code, "biz_code", bizCode, "msg", msg, "biz_msg", bizMsg, "use_config_token", a.UseConfigToken, "account", a.AccountID)
|
||||||
config.Logger.Warn("[create_session] failed", "status", status, "code", code, "msg", msg, "use_config_token", a.UseConfigToken, "account", a.AccountID)
|
|
||||||
if a.UseConfigToken {
|
if a.UseConfigToken {
|
||||||
if isTokenInvalid(status, code, msg) && !refreshed {
|
if isTokenInvalid(status, code, bizCode, msg, bizMsg) && !refreshed {
|
||||||
if c.Auth.RefreshToken(ctx, a) {
|
if c.Auth.RefreshToken(ctx, a) {
|
||||||
refreshed = true
|
refreshed = true
|
||||||
continue
|
continue
|
||||||
@@ -96,6 +95,7 @@ func (c *Client) GetPow(ctx context.Context, a *auth.RequestAuth, maxAttempts in
|
|||||||
maxAttempts = c.maxRetries
|
maxAttempts = c.maxRetries
|
||||||
}
|
}
|
||||||
attempts := 0
|
attempts := 0
|
||||||
|
refreshed := false
|
||||||
for attempts < maxAttempts {
|
for attempts < maxAttempts {
|
||||||
headers := c.authHeaders(a.DeepSeekToken)
|
headers := c.authHeaders(a.DeepSeekToken)
|
||||||
resp, status, err := c.postJSONWithStatus(ctx, c.regular, DeepSeekCreatePowURL, headers, map[string]any{"target_path": "/api/v0/chat/completion"})
|
resp, status, err := c.postJSONWithStatus(ctx, c.regular, DeepSeekCreatePowURL, headers, map[string]any{"target_path": "/api/v0/chat/completion"})
|
||||||
@@ -104,8 +104,8 @@ func (c *Client) GetPow(ctx context.Context, a *auth.RequestAuth, maxAttempts in
|
|||||||
attempts++
|
attempts++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
code := intFrom(resp["code"])
|
code, bizCode, msg, bizMsg := extractResponseStatus(resp)
|
||||||
if status == http.StatusOK && code == 0 {
|
if status == http.StatusOK && code == 0 && bizCode == 0 {
|
||||||
data, _ := resp["data"].(map[string]any)
|
data, _ := resp["data"].(map[string]any)
|
||||||
bizData, _ := data["biz_data"].(map[string]any)
|
bizData, _ := data["biz_data"].(map[string]any)
|
||||||
challenge, _ := bizData["challenge"].(map[string]any)
|
challenge, _ := bizData["challenge"].(map[string]any)
|
||||||
@@ -116,15 +116,16 @@ func (c *Client) GetPow(ctx context.Context, a *auth.RequestAuth, maxAttempts in
|
|||||||
}
|
}
|
||||||
return BuildPowHeader(challenge, answer)
|
return BuildPowHeader(challenge, answer)
|
||||||
}
|
}
|
||||||
msg, _ := resp["msg"].(string)
|
config.Logger.Warn("[get_pow] failed", "status", status, "code", code, "biz_code", bizCode, "msg", msg, "biz_msg", bizMsg, "use_config_token", a.UseConfigToken, "account", a.AccountID)
|
||||||
config.Logger.Warn("[get_pow] failed", "status", status, "code", code, "msg", msg, "use_config_token", a.UseConfigToken, "account", a.AccountID)
|
|
||||||
if a.UseConfigToken {
|
if a.UseConfigToken {
|
||||||
if isTokenInvalid(status, code, msg) {
|
if isTokenInvalid(status, code, bizCode, msg, bizMsg) && !refreshed {
|
||||||
if c.Auth.RefreshToken(ctx, a) {
|
if c.Auth.RefreshToken(ctx, a) {
|
||||||
|
refreshed = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.Auth.SwitchAccount(ctx, a) {
|
if c.Auth.SwitchAccount(ctx, a) {
|
||||||
|
refreshed = false
|
||||||
attempts++
|
attempts++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -143,15 +144,34 @@ func (c *Client) authHeaders(token string) map[string]string {
|
|||||||
return headers
|
return headers
|
||||||
}
|
}
|
||||||
|
|
||||||
func isTokenInvalid(status int, code int, msg string) bool {
|
func isTokenInvalid(status int, code int, bizCode int, msg string, bizMsg string) bool {
|
||||||
msg = strings.ToLower(msg)
|
msg = strings.ToLower(strings.TrimSpace(msg) + " " + strings.TrimSpace(bizMsg))
|
||||||
if status == http.StatusUnauthorized || status == http.StatusForbidden {
|
if status == http.StatusUnauthorized || status == http.StatusForbidden {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if code == 40001 || code == 40002 || code == 40003 {
|
if code == 40001 || code == 40002 || code == 40003 || bizCode == 40001 || bizCode == 40002 || bizCode == 40003 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return strings.Contains(msg, "token") || strings.Contains(msg, "unauthorized")
|
return strings.Contains(msg, "token") ||
|
||||||
|
strings.Contains(msg, "unauthorized") ||
|
||||||
|
strings.Contains(msg, "expired") ||
|
||||||
|
strings.Contains(msg, "not login") ||
|
||||||
|
strings.Contains(msg, "login required") ||
|
||||||
|
strings.Contains(msg, "invalid jwt")
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractResponseStatus(resp map[string]any) (code int, bizCode int, msg string, bizMsg string) {
|
||||||
|
code = intFrom(resp["code"])
|
||||||
|
msg, _ = resp["msg"].(string)
|
||||||
|
data, _ := resp["data"].(map[string]any)
|
||||||
|
bizCode = intFrom(data["biz_code"])
|
||||||
|
bizMsg, _ = data["biz_msg"].(string)
|
||||||
|
if strings.TrimSpace(bizMsg) == "" {
|
||||||
|
if bizData, ok := data["biz_data"].(map[string]any); ok {
|
||||||
|
bizMsg, _ = bizData["msg"].(string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return code, bizCode, msg, bizMsg
|
||||||
}
|
}
|
||||||
|
|
||||||
func normalizeMobileForLogin(raw string) (mobile string, areaCode any) {
|
func normalizeMobileForLogin(raw string) (mobile string, areaCode any) {
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"ds2api/internal/auth"
|
"ds2api/internal/auth"
|
||||||
"ds2api/internal/config"
|
"ds2api/internal/config"
|
||||||
@@ -22,12 +23,12 @@ type SessionInfo struct {
|
|||||||
|
|
||||||
// SessionStats 会话统计结果
|
// SessionStats 会话统计结果
|
||||||
type SessionStats struct {
|
type SessionStats struct {
|
||||||
AccountID string // 账号标识 (email 或 mobile)
|
AccountID string // 账号标识 (email 或 mobile)
|
||||||
FirstPageCount int // 第一页会话数量(当 HasMore 为 true 时,真实总数可能更大)
|
FirstPageCount int // 第一页会话数量(当 HasMore 为 true 时,真实总数可能更大)
|
||||||
PinnedCount int // 置顶会话数量
|
PinnedCount int // 置顶会话数量
|
||||||
HasMore bool // 是否还有更多页
|
HasMore bool // 是否还有更多页
|
||||||
Success bool // 请求是否成功
|
Success bool // 请求是否成功
|
||||||
ErrorMessage string // 错误信息
|
ErrorMessage string // 错误信息
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSessionCount 获取单个账号的会话数量
|
// GetSessionCount 获取单个账号的会话数量
|
||||||
@@ -56,8 +57,8 @@ func (c *Client) GetSessionCount(ctx context.Context, a *auth.RequestAuth, maxAt
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
code := intFrom(resp["code"])
|
code, bizCode, msg, bizMsg := extractResponseStatus(resp)
|
||||||
if status == http.StatusOK && code == 0 {
|
if status == http.StatusOK && code == 0 && bizCode == 0 {
|
||||||
data, _ := resp["data"].(map[string]any)
|
data, _ := resp["data"].(map[string]any)
|
||||||
bizData, _ := data["biz_data"].(map[string]any)
|
bizData, _ := data["biz_data"].(map[string]any)
|
||||||
chatSessions, _ := bizData["chat_sessions"].([]any)
|
chatSessions, _ := bizData["chat_sessions"].([]any)
|
||||||
@@ -79,12 +80,11 @@ func (c *Client) GetSessionCount(ctx context.Context, a *auth.RequestAuth, maxAt
|
|||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
msg, _ := resp["msg"].(string)
|
|
||||||
stats.ErrorMessage = fmt.Sprintf("status=%d, code=%d, msg=%s", status, code, msg)
|
stats.ErrorMessage = fmt.Sprintf("status=%d, code=%d, msg=%s", status, code, msg)
|
||||||
config.Logger.Warn("[get_session_count] failed", "status", status, "code", code, "msg", msg, "account", a.AccountID)
|
config.Logger.Warn("[get_session_count] failed", "status", status, "code", code, "biz_code", bizCode, "msg", msg, "biz_msg", bizMsg, "account", a.AccountID)
|
||||||
|
|
||||||
if a.UseConfigToken {
|
if a.UseConfigToken {
|
||||||
if isTokenInvalid(status, code, msg) && !refreshed {
|
if isTokenInvalid(status, code, bizCode, msg, bizMsg) && !refreshed {
|
||||||
if c.Auth.RefreshToken(ctx, a) {
|
if c.Auth.RefreshToken(ctx, a) {
|
||||||
refreshed = true
|
refreshed = true
|
||||||
continue
|
continue
|
||||||
@@ -114,9 +114,11 @@ func (c *Client) GetSessionCountForToken(ctx context.Context, token string) (*Se
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
code := intFrom(resp["code"])
|
code, bizCode, msg, bizMsg := extractResponseStatus(resp)
|
||||||
if status != http.StatusOK || code != 0 {
|
if status != http.StatusOK || code != 0 || bizCode != 0 {
|
||||||
msg, _ := resp["msg"].(string)
|
if strings.TrimSpace(bizMsg) != "" {
|
||||||
|
msg = bizMsg
|
||||||
|
}
|
||||||
return nil, fmt.Errorf("request failed: status=%d, code=%d, msg=%s", status, code, msg)
|
return nil, fmt.Errorf("request failed: status=%d, code=%d, msg=%s", status, code, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,18 +49,17 @@ func (c *Client) DeleteSession(ctx context.Context, a *auth.RequestAuth, session
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
code := intFrom(resp["code"])
|
code, bizCode, msg, bizMsg := extractResponseStatus(resp)
|
||||||
if status == http.StatusOK && code == 0 {
|
if status == http.StatusOK && code == 0 && bizCode == 0 {
|
||||||
result.Success = true
|
result.Success = true
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
msg, _ := resp["msg"].(string)
|
|
||||||
result.ErrorMessage = fmt.Sprintf("status=%d, code=%d, msg=%s", status, code, msg)
|
result.ErrorMessage = fmt.Sprintf("status=%d, code=%d, msg=%s", status, code, msg)
|
||||||
config.Logger.Warn("[delete_session] failed", "status", status, "code", code, "msg", msg, "session_id", sessionID)
|
config.Logger.Warn("[delete_session] failed", "status", status, "code", code, "biz_code", bizCode, "msg", msg, "biz_msg", bizMsg, "session_id", sessionID)
|
||||||
|
|
||||||
if a.UseConfigToken {
|
if a.UseConfigToken {
|
||||||
if isTokenInvalid(status, code, msg) && !refreshed {
|
if isTokenInvalid(status, code, bizCode, msg, bizMsg) && !refreshed {
|
||||||
if c.Auth.RefreshToken(ctx, a) {
|
if c.Auth.RefreshToken(ctx, a) {
|
||||||
refreshed = true
|
refreshed = true
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -8,15 +8,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func BuildChatCompletion(completionID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
|
func BuildChatCompletion(completionID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
|
||||||
detected := util.ParseStandaloneToolCalls(finalText, toolNames)
|
detected := util.ParseStandaloneToolCallsDetailed(finalText, toolNames)
|
||||||
finishReason := "stop"
|
finishReason := "stop"
|
||||||
messageObj := map[string]any{"role": "assistant", "content": finalText}
|
messageObj := map[string]any{"role": "assistant", "content": finalText}
|
||||||
if strings.TrimSpace(finalThinking) != "" {
|
if strings.TrimSpace(finalThinking) != "" {
|
||||||
messageObj["reasoning_content"] = finalThinking
|
messageObj["reasoning_content"] = finalThinking
|
||||||
}
|
}
|
||||||
if len(detected) > 0 {
|
if len(detected.Calls) > 0 {
|
||||||
finishReason = "tool_calls"
|
finishReason = "tool_calls"
|
||||||
messageObj["tool_calls"] = util.FormatOpenAIToolCalls(detected)
|
messageObj["tool_calls"] = util.FormatOpenAIToolCalls(detected.Calls)
|
||||||
messageObj["content"] = nil
|
messageObj["content"] = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,12 +13,12 @@ import (
|
|||||||
func BuildResponseObject(responseID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
|
func BuildResponseObject(responseID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
|
||||||
// Strict mode: only standalone, structured tool-call payloads are treated
|
// Strict mode: only standalone, structured tool-call payloads are treated
|
||||||
// as executable tool calls.
|
// as executable tool calls.
|
||||||
detected := util.ParseStandaloneToolCalls(finalText, toolNames)
|
detected := util.ParseStandaloneToolCallsDetailed(finalText, toolNames)
|
||||||
exposedOutputText := finalText
|
exposedOutputText := finalText
|
||||||
output := make([]any, 0, 2)
|
output := make([]any, 0, 2)
|
||||||
if len(detected) > 0 {
|
if len(detected.Calls) > 0 {
|
||||||
exposedOutputText = ""
|
exposedOutputText = ""
|
||||||
output = append(output, toResponsesFunctionCallItems(detected)...)
|
output = append(output, toResponsesFunctionCallItems(detected.Calls)...)
|
||||||
} else {
|
} else {
|
||||||
content := make([]any, 0, 2)
|
content := make([]any, 0, 2)
|
||||||
if finalThinking != "" {
|
if finalThinking != "" {
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ func TestBuildResponseObjectToolCallsFollowChatShape(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildResponseObjectTreatsMixedProseToolPayloadAsText(t *testing.T) {
|
func TestBuildResponseObjectPromotesMixedProseToolPayloadToFunctionCall(t *testing.T) {
|
||||||
obj := BuildResponseObject(
|
obj := BuildResponseObject(
|
||||||
"resp_test",
|
"resp_test",
|
||||||
"gpt-4o",
|
"gpt-4o",
|
||||||
@@ -56,20 +56,20 @@ func TestBuildResponseObjectTreatsMixedProseToolPayloadAsText(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
outputText, _ := obj["output_text"].(string)
|
outputText, _ := obj["output_text"].(string)
|
||||||
if outputText == "" {
|
if outputText != "" {
|
||||||
t.Fatalf("expected output_text preserved for mixed prose payload")
|
t.Fatalf("expected output_text hidden for mixed prose tool payload, got %q", outputText)
|
||||||
}
|
}
|
||||||
output, _ := obj["output"].([]any)
|
output, _ := obj["output"].([]any)
|
||||||
if len(output) != 1 {
|
if len(output) != 1 {
|
||||||
t.Fatalf("expected one message output item, got %#v", obj["output"])
|
t.Fatalf("expected one function_call output item, got %#v", obj["output"])
|
||||||
}
|
}
|
||||||
first, _ := output[0].(map[string]any)
|
first, _ := output[0].(map[string]any)
|
||||||
if first["type"] != "message" {
|
if first["type"] != "function_call" {
|
||||||
t.Fatalf("expected message output type, got %#v", first["type"])
|
t.Fatalf("expected function_call output type, got %#v", first["type"])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildResponseObjectFencedToolPayloadRemainsText(t *testing.T) {
|
func TestBuildResponseObjectPromotesFencedToolPayloadToFunctionCall(t *testing.T) {
|
||||||
obj := BuildResponseObject(
|
obj := BuildResponseObject(
|
||||||
"resp_test",
|
"resp_test",
|
||||||
"gpt-4o",
|
"gpt-4o",
|
||||||
@@ -80,16 +80,16 @@ func TestBuildResponseObjectFencedToolPayloadRemainsText(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
outputText, _ := obj["output_text"].(string)
|
outputText, _ := obj["output_text"].(string)
|
||||||
if outputText == "" {
|
if outputText != "" {
|
||||||
t.Fatalf("expected output_text preserved for fenced example")
|
t.Fatalf("expected output_text hidden for fenced tool payload, got %q", outputText)
|
||||||
}
|
}
|
||||||
output, _ := obj["output"].([]any)
|
output, _ := obj["output"].([]any)
|
||||||
if len(output) != 1 {
|
if len(output) != 1 {
|
||||||
t.Fatalf("expected one message output item, got %#v", obj["output"])
|
t.Fatalf("expected one function_call output item, got %#v", obj["output"])
|
||||||
}
|
}
|
||||||
first, _ := output[0].(map[string]any)
|
first, _ := output[0].(map[string]any)
|
||||||
if first["type"] != "message" {
|
if first["type"] != "function_call" {
|
||||||
t.Fatalf("expected message output type, got %#v", first["type"])
|
t.Fatalf("expected function_call output type, got %#v", first["type"])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,33 +1,22 @@
|
|||||||
'use strict';
|
'use strict';
|
||||||
|
|
||||||
const {
|
const {
|
||||||
extractToolNames,
|
|
||||||
createToolSieveState,
|
createToolSieveState,
|
||||||
processToolSieveChunk,
|
processToolSieveChunk,
|
||||||
flushToolSieve,
|
flushToolSieve,
|
||||||
parseStandaloneToolCalls,
|
parseStandaloneToolCalls,
|
||||||
formatOpenAIStreamToolCalls,
|
formatOpenAIStreamToolCalls,
|
||||||
} = require('../helpers/stream-tool-sieve');
|
} = require('../helpers/stream-tool-sieve');
|
||||||
const {
|
const { BASE_HEADERS } = require('../shared/deepseek-constants');
|
||||||
BASE_HEADERS,
|
const { writeOpenAIError } = require('./error_shape');
|
||||||
} = require('../shared/deepseek-constants');
|
const { parseChunkForContent, isCitation } = require('./sse_parse');
|
||||||
|
const { buildUsage } = require('./token_usage');
|
||||||
const {
|
|
||||||
writeOpenAIError,
|
|
||||||
} = require('./error_shape');
|
|
||||||
const {
|
|
||||||
parseChunkForContent,
|
|
||||||
isCitation,
|
|
||||||
} = require('./sse_parse');
|
|
||||||
const {
|
|
||||||
buildUsage,
|
|
||||||
} = require('./token_usage');
|
|
||||||
const {
|
const {
|
||||||
resolveToolcallPolicy,
|
resolveToolcallPolicy,
|
||||||
|
formatIncrementalToolCallDeltas,
|
||||||
|
filterIncrementalToolCallDeltasByAllowed,
|
||||||
} = require('./toolcall_policy');
|
} = require('./toolcall_policy');
|
||||||
const {
|
const { createChatCompletionEmitter } = require('./stream_emitter');
|
||||||
createChatCompletionEmitter,
|
|
||||||
} = require('./stream_emitter');
|
|
||||||
const {
|
const {
|
||||||
asString,
|
asString,
|
||||||
isAbortError,
|
isAbortError,
|
||||||
@@ -57,6 +46,7 @@ async function handleVercelStream(req, res, rawBody, payload) {
|
|||||||
const searchEnabled = toBool(prep.body.search_enabled);
|
const searchEnabled = toBool(prep.body.search_enabled);
|
||||||
const toolPolicy = resolveToolcallPolicy(prep.body, payload.tools);
|
const toolPolicy = resolveToolcallPolicy(prep.body, payload.tools);
|
||||||
const toolNames = toolPolicy.toolNames;
|
const toolNames = toolPolicy.toolNames;
|
||||||
|
const emitEarlyToolDeltas = toolPolicy.emitEarlyToolDeltas;
|
||||||
|
|
||||||
if (!model || !leaseID || !deepseekToken || !powHeader || !completionPayload) {
|
if (!model || !leaseID || !deepseekToken || !powHeader || !completionPayload) {
|
||||||
writeOpenAIError(res, 500, 'invalid vercel prepare response');
|
writeOpenAIError(res, 500, 'invalid vercel prepare response');
|
||||||
@@ -132,6 +122,7 @@ async function handleVercelStream(req, res, rawBody, payload) {
|
|||||||
const toolSieveState = createToolSieveState();
|
const toolSieveState = createToolSieveState();
|
||||||
let toolCallsEmitted = false;
|
let toolCallsEmitted = false;
|
||||||
const streamToolCallIDs = new Map();
|
const streamToolCallIDs = new Map();
|
||||||
|
const streamToolNames = new Map();
|
||||||
const decoder = new TextDecoder();
|
const decoder = new TextDecoder();
|
||||||
reader = completionRes.body.getReader();
|
reader = completionRes.body.getReader();
|
||||||
let buffered = '';
|
let buffered = '';
|
||||||
@@ -255,6 +246,18 @@ async function handleVercelStream(req, res, rawBody, payload) {
|
|||||||
}
|
}
|
||||||
const events = processToolSieveChunk(toolSieveState, p.text, toolNames);
|
const events = processToolSieveChunk(toolSieveState, p.text, toolNames);
|
||||||
for (const evt of events) {
|
for (const evt of events) {
|
||||||
|
if (evt.type === 'tool_call_deltas') {
|
||||||
|
if (!emitEarlyToolDeltas) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
const filtered = filterIncrementalToolCallDeltasByAllowed(evt.deltas, toolNames, streamToolNames);
|
||||||
|
const formatted = formatIncrementalToolCallDeltas(filtered, streamToolCallIDs);
|
||||||
|
if (formatted.length > 0) {
|
||||||
|
toolCallsEmitted = true;
|
||||||
|
sendDeltaFrame({ tool_calls: formatted });
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (evt.type === 'tool_calls') {
|
if (evt.type === 'tool_calls') {
|
||||||
toolCallsEmitted = true;
|
toolCallsEmitted = true;
|
||||||
sendDeltaFrame({ tool_calls: formatOpenAIStreamToolCalls(evt.calls, streamToolCallIDs) });
|
sendDeltaFrame({ tool_calls: formatOpenAIStreamToolCalls(evt.calls, streamToolCallIDs) });
|
||||||
|
|||||||
@@ -2,10 +2,8 @@
|
|||||||
|
|
||||||
const {
|
const {
|
||||||
toStringSafe,
|
toStringSafe,
|
||||||
looksLikeToolExampleContext,
|
|
||||||
} = require('./state');
|
} = require('./state');
|
||||||
const {
|
const {
|
||||||
stripFencedCodeBlocks,
|
|
||||||
buildToolCallCandidates,
|
buildToolCallCandidates,
|
||||||
parseToolCallsPayload,
|
parseToolCallsPayload,
|
||||||
parseMarkupToolCalls,
|
parseMarkupToolCalls,
|
||||||
@@ -38,16 +36,13 @@ function parseToolCalls(text, toolNames) {
|
|||||||
|
|
||||||
function parseToolCallsDetailed(text, toolNames) {
|
function parseToolCallsDetailed(text, toolNames) {
|
||||||
const result = emptyParseResult();
|
const result = emptyParseResult();
|
||||||
if (!toStringSafe(text)) {
|
const normalized = toStringSafe(text);
|
||||||
|
if (!normalized) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
const sanitized = stripFencedCodeBlocks(text);
|
result.sawToolCallSyntax = looksLikeToolCallSyntax(normalized);
|
||||||
if (!toStringSafe(sanitized)) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
result.sawToolCallSyntax = looksLikeToolCallSyntax(sanitized);
|
|
||||||
|
|
||||||
const candidates = buildToolCallCandidates(sanitized);
|
const candidates = buildToolCallCandidates(normalized);
|
||||||
let parsed = [];
|
let parsed = [];
|
||||||
for (const c of candidates) {
|
for (const c of candidates) {
|
||||||
parsed = parseToolCallsPayload(c);
|
parsed = parseToolCallsPayload(c);
|
||||||
@@ -63,9 +58,9 @@ function parseToolCallsDetailed(text, toolNames) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (parsed.length === 0) {
|
if (parsed.length === 0) {
|
||||||
parsed = parseMarkupToolCalls(sanitized);
|
parsed = parseMarkupToolCalls(normalized);
|
||||||
if (parsed.length === 0) {
|
if (parsed.length === 0) {
|
||||||
parsed = parseTextKVToolCalls(sanitized);
|
parsed = parseTextKVToolCalls(normalized);
|
||||||
if (parsed.length === 0) {
|
if (parsed.length === 0) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@@ -90,22 +85,29 @@ function parseStandaloneToolCallsDetailed(text, toolNames) {
|
|||||||
if (!trimmed) {
|
if (!trimmed) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
if (trimmed.includes('```')) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
if (looksLikeToolExampleContext(trimmed)) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
result.sawToolCallSyntax = looksLikeToolCallSyntax(trimmed);
|
result.sawToolCallSyntax = looksLikeToolCallSyntax(trimmed);
|
||||||
let parsed = parseToolCallsPayload(trimmed);
|
const candidates = buildToolCallCandidates(trimmed);
|
||||||
|
let parsed = [];
|
||||||
|
for (const c of candidates) {
|
||||||
|
parsed = parseToolCallsPayload(c);
|
||||||
|
if (parsed.length === 0) {
|
||||||
|
parsed = parseMarkupToolCalls(c);
|
||||||
|
}
|
||||||
|
if (parsed.length === 0) {
|
||||||
|
parsed = parseTextKVToolCalls(c);
|
||||||
|
}
|
||||||
|
if (parsed.length > 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (parsed.length === 0) {
|
if (parsed.length === 0) {
|
||||||
parsed = parseMarkupToolCalls(trimmed);
|
parsed = parseMarkupToolCalls(trimmed);
|
||||||
}
|
if (parsed.length === 0) {
|
||||||
if (parsed.length === 0) {
|
parsed = parseTextKVToolCalls(trimmed);
|
||||||
parsed = parseTextKVToolCalls(trimmed);
|
if (parsed.length === 0) {
|
||||||
}
|
return result;
|
||||||
if (parsed.length === 0) {
|
}
|
||||||
return result;
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
result.sawToolCallSyntax = true;
|
result.sawToolCallSyntax = true;
|
||||||
|
|||||||
@@ -46,6 +46,9 @@ function processToolSieveChunk(state, chunk, toolNames) {
|
|||||||
if (Array.isArray(consumed.calls) && consumed.calls.length > 0) {
|
if (Array.isArray(consumed.calls) && consumed.calls.length > 0) {
|
||||||
state.pendingToolRaw = captured;
|
state.pendingToolRaw = captured;
|
||||||
state.pendingToolCalls = consumed.calls;
|
state.pendingToolCalls = consumed.calls;
|
||||||
|
if (consumed.suffix) {
|
||||||
|
state.pending = consumed.suffix + state.pending;
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (consumed.prefix) {
|
if (consumed.prefix) {
|
||||||
@@ -165,19 +168,34 @@ function findToolSegmentStart(s) {
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
const lower = s.toLowerCase();
|
const lower = s.toLowerCase();
|
||||||
|
const keywords = ['tool_calls', 'function.name:', '[tool_call_history]'];
|
||||||
let offset = 0;
|
let offset = 0;
|
||||||
// eslint-disable-next-line no-constant-condition
|
// eslint-disable-next-line no-constant-condition
|
||||||
while (true) {
|
while (true) {
|
||||||
const keyIdx = lower.indexOf('tool_calls', offset);
|
let bestKeyIdx = -1;
|
||||||
if (keyIdx < 0) {
|
let matchedKeyword = '';
|
||||||
|
|
||||||
|
for (const kw of keywords) {
|
||||||
|
const idx = lower.indexOf(kw, offset);
|
||||||
|
if (idx >= 0) {
|
||||||
|
if (bestKeyIdx < 0 || idx < bestKeyIdx) {
|
||||||
|
bestKeyIdx = idx;
|
||||||
|
matchedKeyword = kw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bestKeyIdx < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const keyIdx = bestKeyIdx;
|
||||||
const start = s.slice(0, keyIdx).lastIndexOf('{');
|
const start = s.slice(0, keyIdx).lastIndexOf('{');
|
||||||
const candidateStart = start >= 0 ? start : keyIdx;
|
const candidateStart = start >= 0 ? start : keyIdx;
|
||||||
if (!insideCodeFence(s.slice(0, candidateStart))) {
|
if (!insideCodeFence(s.slice(0, candidateStart))) {
|
||||||
return candidateStart;
|
return candidateStart;
|
||||||
}
|
}
|
||||||
offset = keyIdx + 'tool_calls'.length;
|
offset = keyIdx + matchedKeyword.length;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -187,20 +205,28 @@ function consumeToolCapture(state, toolNames) {
|
|||||||
return { ready: false, prefix: '', calls: [], suffix: '' };
|
return { ready: false, prefix: '', calls: [], suffix: '' };
|
||||||
}
|
}
|
||||||
const lower = captured.toLowerCase();
|
const lower = captured.toLowerCase();
|
||||||
const keyIdx = lower.indexOf('tool_calls');
|
|
||||||
|
let keyIdx = -1;
|
||||||
|
const keywords = ['tool_calls', 'function.name:', '[tool_call_history]'];
|
||||||
|
for (const kw of keywords) {
|
||||||
|
const idx = lower.indexOf(kw);
|
||||||
|
if (idx >= 0 && (keyIdx < 0 || idx < keyIdx)) {
|
||||||
|
keyIdx = idx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (keyIdx < 0) {
|
if (keyIdx < 0) {
|
||||||
return { ready: false, prefix: '', calls: [], suffix: '' };
|
return { ready: false, prefix: '', calls: [], suffix: '' };
|
||||||
}
|
}
|
||||||
const start = captured.slice(0, keyIdx).lastIndexOf('{');
|
const start = captured.slice(0, keyIdx).lastIndexOf('{');
|
||||||
if (start < 0) {
|
const actualStart = start >= 0 ? start : keyIdx;
|
||||||
return { ready: false, prefix: '', calls: [], suffix: '' };
|
|
||||||
}
|
const obj = extractJSONObjectFrom(captured, actualStart);
|
||||||
const obj = extractJSONObjectFrom(captured, start);
|
|
||||||
if (!obj.ok) {
|
if (!obj.ok) {
|
||||||
return { ready: false, prefix: '', calls: [], suffix: '' };
|
return { ready: false, prefix: '', calls: [], suffix: '' };
|
||||||
}
|
}
|
||||||
|
|
||||||
const prefixPart = captured.slice(0, start);
|
const prefixPart = captured.slice(0, actualStart);
|
||||||
const suffixPart = captured.slice(obj.end);
|
const suffixPart = captured.slice(obj.end);
|
||||||
|
|
||||||
if (insideCodeFence((state.recentTextTail || '') + prefixPart)) {
|
if (insideCodeFence((state.recentTextTail || '') + prefixPart)) {
|
||||||
@@ -212,16 +238,7 @@ function consumeToolCapture(state, toolNames) {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((state.recentTextTail || '').trim() !== '' || prefixPart.trim() !== '' || suffixPart.trim() !== '') {
|
const parsed = parseStandaloneToolCallsDetailed(captured.slice(actualStart, obj.end), toolNames);
|
||||||
return {
|
|
||||||
ready: true,
|
|
||||||
prefix: captured,
|
|
||||||
calls: [],
|
|
||||||
suffix: '',
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
const parsed = parseStandaloneToolCallsDetailed(captured.slice(start, obj.end), toolNames);
|
|
||||||
if (!Array.isArray(parsed.calls) || parsed.calls.length === 0) {
|
if (!Array.isArray(parsed.calls) || parsed.calls.length === 0) {
|
||||||
if (parsed.sawToolCallSyntax && parsed.rejectedByPolicy) {
|
if (parsed.sawToolCallSyntax && parsed.rejectedByPolicy) {
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ func buildToolCallCandidates(text string) []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// best-effort extraction around "tool_calls" key in mixed text payloads.
|
// best-effort extraction around tool call keywords in mixed text payloads.
|
||||||
candidates = append(candidates, extractToolCallObjects(trimmed)...)
|
candidates = append(candidates, extractToolCallObjects(trimmed)...)
|
||||||
|
|
||||||
// best-effort object slice: from first '{' to last '}'
|
// best-effort object slice: from first '{' to last '}'
|
||||||
@@ -57,25 +57,65 @@ func extractToolCallObjects(text string) []string {
|
|||||||
lower := strings.ToLower(text)
|
lower := strings.ToLower(text)
|
||||||
out := []string{}
|
out := []string{}
|
||||||
offset := 0
|
offset := 0
|
||||||
|
keywords := []string{"tool_calls", "function.name:", "[tool_call_history]"}
|
||||||
for {
|
for {
|
||||||
idx := strings.Index(lower[offset:], "tool_calls")
|
bestIdx := -1
|
||||||
if idx < 0 {
|
matchedKeyword := ""
|
||||||
|
for _, kw := range keywords {
|
||||||
|
idx := strings.Index(lower[offset:], kw)
|
||||||
|
if idx >= 0 {
|
||||||
|
absIdx := offset + idx
|
||||||
|
if bestIdx < 0 || absIdx < bestIdx {
|
||||||
|
bestIdx = absIdx
|
||||||
|
matchedKeyword = kw
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if bestIdx < 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
idx += offset
|
|
||||||
start := strings.LastIndex(text[:idx], "{")
|
idx := bestIdx
|
||||||
for start >= 0 {
|
// Avoid backtracking too far to prevent OOM on malicious or very long strings
|
||||||
|
searchLimit := idx - 2000
|
||||||
|
if searchLimit < offset {
|
||||||
|
searchLimit = offset
|
||||||
|
}
|
||||||
|
|
||||||
|
start := strings.LastIndex(text[searchLimit:idx], "{")
|
||||||
|
if start >= 0 {
|
||||||
|
start += searchLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
if start < 0 {
|
||||||
|
offset = idx + len(matchedKeyword)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
foundObj := false
|
||||||
|
for start >= searchLimit {
|
||||||
candidate, end, ok := extractJSONObject(text, start)
|
candidate, end, ok := extractJSONObject(text, start)
|
||||||
if ok {
|
if ok {
|
||||||
// Move forward to avoid repeatedly matching the same object.
|
// Move forward to avoid repeatedly matching the same object.
|
||||||
offset = end
|
offset = end
|
||||||
out = append(out, strings.TrimSpace(candidate))
|
out = append(out, strings.TrimSpace(candidate))
|
||||||
|
foundObj = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
start = strings.LastIndex(text[:start], "{")
|
// Try previous '{'
|
||||||
|
if start > searchLimit {
|
||||||
|
prevStart := strings.LastIndex(text[searchLimit:start], "{")
|
||||||
|
if prevStart >= 0 {
|
||||||
|
start = searchLimit + prevStart
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
if start < 0 {
|
|
||||||
offset = idx + len("tool_calls")
|
if !foundObj {
|
||||||
|
offset = idx + len(matchedKeyword)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
@@ -88,7 +128,12 @@ func extractJSONObject(text string, start int) (string, int, bool) {
|
|||||||
depth := 0
|
depth := 0
|
||||||
quote := byte(0)
|
quote := byte(0)
|
||||||
escaped := false
|
escaped := false
|
||||||
for i := start; i < len(text); i++ {
|
// Limit scan length to avoid OOM on unclosed objects
|
||||||
|
maxLen := start + 50000
|
||||||
|
if maxLen > len(text) {
|
||||||
|
maxLen = len(text)
|
||||||
|
}
|
||||||
|
for i := start; i < maxLen; i++ {
|
||||||
ch := text[i]
|
ch := text[i]
|
||||||
if quote != 0 {
|
if quote != 0 {
|
||||||
if escaped {
|
if escaped {
|
||||||
|
|||||||
108
internal/util/toolcalls_input_parse.go
Normal file
108
internal/util/toolcalls_input_parse.go
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseToolCallInput(v any) map[string]any {
|
||||||
|
switch x := v.(type) {
|
||||||
|
case nil:
|
||||||
|
return map[string]any{}
|
||||||
|
case map[string]any:
|
||||||
|
return x
|
||||||
|
case string:
|
||||||
|
raw := strings.TrimSpace(x)
|
||||||
|
if raw == "" {
|
||||||
|
return map[string]any{}
|
||||||
|
}
|
||||||
|
var parsed map[string]any
|
||||||
|
if err := json.Unmarshal([]byte(raw), &parsed); err == nil && parsed != nil {
|
||||||
|
repairPathLikeControlChars(parsed)
|
||||||
|
return parsed
|
||||||
|
}
|
||||||
|
// Try to repair invalid backslashes (common in Windows paths output by models)
|
||||||
|
repaired := repairInvalidJSONBackslashes(raw)
|
||||||
|
if repaired != raw {
|
||||||
|
if err := json.Unmarshal([]byte(repaired), &parsed); err == nil && parsed != nil {
|
||||||
|
repairPathLikeControlChars(parsed)
|
||||||
|
return parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Try to repair loose JSON in string argument as well
|
||||||
|
repairedLoose := RepairLooseJSON(raw)
|
||||||
|
if repairedLoose != raw {
|
||||||
|
if err := json.Unmarshal([]byte(repairedLoose), &parsed); err == nil && parsed != nil {
|
||||||
|
repairPathLikeControlChars(parsed)
|
||||||
|
return parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return map[string]any{"_raw": raw}
|
||||||
|
default:
|
||||||
|
b, err := json.Marshal(x)
|
||||||
|
if err != nil {
|
||||||
|
return map[string]any{}
|
||||||
|
}
|
||||||
|
var parsed map[string]any
|
||||||
|
if err := json.Unmarshal(b, &parsed); err == nil && parsed != nil {
|
||||||
|
return parsed
|
||||||
|
}
|
||||||
|
return map[string]any{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func repairPathLikeControlChars(m map[string]any) {
|
||||||
|
for k, v := range m {
|
||||||
|
switch vv := v.(type) {
|
||||||
|
case map[string]any:
|
||||||
|
repairPathLikeControlChars(vv)
|
||||||
|
case []any:
|
||||||
|
for _, item := range vv {
|
||||||
|
if child, ok := item.(map[string]any); ok {
|
||||||
|
repairPathLikeControlChars(child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case string:
|
||||||
|
if isPathLikeKey(k) && containsControlRune(vv) {
|
||||||
|
m[k] = escapeControlRunes(vv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPathLikeKey(key string) bool {
|
||||||
|
k := strings.ToLower(strings.TrimSpace(key))
|
||||||
|
return strings.Contains(k, "path") || strings.Contains(k, "file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsControlRune(s string) bool {
|
||||||
|
for _, r := range s {
|
||||||
|
if unicode.IsControl(r) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeControlRunes(s string) string {
|
||||||
|
var b strings.Builder
|
||||||
|
b.Grow(len(s) + 8)
|
||||||
|
for _, r := range s {
|
||||||
|
switch r {
|
||||||
|
case '\b':
|
||||||
|
b.WriteString(`\b`)
|
||||||
|
case '\f':
|
||||||
|
b.WriteString(`\f`)
|
||||||
|
case '\n':
|
||||||
|
b.WriteString(`\n`)
|
||||||
|
case '\r':
|
||||||
|
b.WriteString(`\r`)
|
||||||
|
case '\t':
|
||||||
|
b.WriteString(`\t`)
|
||||||
|
default:
|
||||||
|
b.WriteRune(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
79
internal/util/toolcalls_json_repair.go
Normal file
79
internal/util/toolcalls_json_repair.go
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func repairInvalidJSONBackslashes(s string) string {
|
||||||
|
if !strings.Contains(s, "\\") {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
var out strings.Builder
|
||||||
|
out.Grow(len(s) + 10)
|
||||||
|
runes := []rune(s)
|
||||||
|
for i := 0; i < len(runes); i++ {
|
||||||
|
if runes[i] == '\\' {
|
||||||
|
if i+1 < len(runes) {
|
||||||
|
next := runes[i+1]
|
||||||
|
switch next {
|
||||||
|
case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
|
||||||
|
out.WriteRune('\\')
|
||||||
|
out.WriteRune(next)
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
case 'u':
|
||||||
|
if i+5 < len(runes) {
|
||||||
|
isHex := true
|
||||||
|
for j := 1; j <= 4; j++ {
|
||||||
|
r := runes[i+1+j]
|
||||||
|
if !((r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')) {
|
||||||
|
isHex = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isHex {
|
||||||
|
out.WriteRune('\\')
|
||||||
|
out.WriteRune('u')
|
||||||
|
for j := 1; j <= 4; j++ {
|
||||||
|
out.WriteRune(runes[i+1+j])
|
||||||
|
}
|
||||||
|
i += 5
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Not a valid escape sequence, double it
|
||||||
|
out.WriteString("\\\\")
|
||||||
|
} else {
|
||||||
|
out.WriteRune(runes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var unquotedKeyPattern = regexp.MustCompile(`([{,]\s*)([a-zA-Z_][a-zA-Z0-9_]*)\s*:`)
|
||||||
|
|
||||||
|
// missingArrayBracketsPattern identifies a sequence of two or more JSON objects separated by commas
|
||||||
|
// that immediately follow a colon, which indicates a missing array bracket `[` `]`.
|
||||||
|
// E.g., "key": {"a": 1}, {"b": 2} -> "key": [{"a": 1}, {"b": 2}]
|
||||||
|
// NOTE: The pattern uses (?:[^{}]|\{[^{}]*\})* to support single-level nested {} objects,
|
||||||
|
// which handles cases like {"content": "x", "input": {"q": "y"}}
|
||||||
|
var missingArrayBracketsPattern = regexp.MustCompile(`(:\s*)(\{(?:[^{}]|\{[^{}]*\})*\}(?:\s*,\s*\{(?:[^{}]|\{[^{}]*\})*\})+)`)
|
||||||
|
|
||||||
|
func RepairLooseJSON(s string) string {
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
if s == "" {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
// 1. Replace unquoted keys: {key: -> {"key":
|
||||||
|
s = unquotedKeyPattern.ReplaceAllString(s, `$1"$2":`)
|
||||||
|
|
||||||
|
// 2. Heuristic: Fix missing array brackets for list of objects
|
||||||
|
// e.g., : {obj1}, {obj2} -> : [{obj1}, {obj2}]
|
||||||
|
// This specifically addresses DeepSeek's "list hallucination"
|
||||||
|
s = missingArrayBracketsPattern.ReplaceAllString(s, `$1[$2]`)
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
@@ -16,7 +16,6 @@ type ToolCallParseResult struct {
|
|||||||
RejectedByPolicy bool
|
RejectedByPolicy bool
|
||||||
RejectedToolNames []string
|
RejectedToolNames []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseToolCalls(text string, availableToolNames []string) []ParsedToolCall {
|
func ParseToolCalls(text string, availableToolNames []string) []ParsedToolCall {
|
||||||
return ParseToolCallsDetailed(text, availableToolNames).Calls
|
return ParseToolCallsDetailed(text, availableToolNames).Calls
|
||||||
}
|
}
|
||||||
@@ -26,10 +25,6 @@ func ParseToolCallsDetailed(text string, availableToolNames []string) ToolCallPa
|
|||||||
if strings.TrimSpace(text) == "" {
|
if strings.TrimSpace(text) == "" {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
text = stripFencedCodeBlocks(text)
|
|
||||||
if strings.TrimSpace(text) == "" {
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
result.SawToolCallSyntax = looksLikeToolCallSyntax(text)
|
result.SawToolCallSyntax = looksLikeToolCallSyntax(text)
|
||||||
|
|
||||||
candidates := buildToolCallCandidates(text)
|
candidates := buildToolCallCandidates(text)
|
||||||
@@ -68,7 +63,6 @@ func ParseToolCallsDetailed(text string, availableToolNames []string) ToolCallPa
|
|||||||
result.RejectedByPolicy = len(rejectedNames) > 0 && len(calls) == 0
|
result.RejectedByPolicy = len(rejectedNames) > 0 && len(calls) == 0
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseStandaloneToolCalls(text string, availableToolNames []string) []ParsedToolCall {
|
func ParseStandaloneToolCalls(text string, availableToolNames []string) []ParsedToolCall {
|
||||||
return ParseStandaloneToolCallsDetailed(text, availableToolNames).Calls
|
return ParseStandaloneToolCallsDetailed(text, availableToolNames).Calls
|
||||||
}
|
}
|
||||||
@@ -79,17 +73,15 @@ func ParseStandaloneToolCallsDetailed(text string, availableToolNames []string)
|
|||||||
if trimmed == "" {
|
if trimmed == "" {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
if looksLikeToolExampleContext(trimmed) {
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
result.SawToolCallSyntax = looksLikeToolCallSyntax(trimmed)
|
result.SawToolCallSyntax = looksLikeToolCallSyntax(trimmed)
|
||||||
candidates := []string{trimmed}
|
candidates := buildToolCallCandidates(trimmed)
|
||||||
|
var parsed []ParsedToolCall
|
||||||
for _, candidate := range candidates {
|
for _, candidate := range candidates {
|
||||||
candidate = strings.TrimSpace(candidate)
|
candidate = strings.TrimSpace(candidate)
|
||||||
if candidate == "" {
|
if candidate == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
parsed := parseToolCallsPayload(candidate)
|
parsed = parseToolCallsPayload(candidate)
|
||||||
if len(parsed) == 0 {
|
if len(parsed) == 0 {
|
||||||
parsed = parseXMLToolCalls(candidate)
|
parsed = parseXMLToolCalls(candidate)
|
||||||
}
|
}
|
||||||
@@ -100,14 +92,23 @@ func ParseStandaloneToolCallsDetailed(text string, availableToolNames []string)
|
|||||||
parsed = parseTextKVToolCalls(candidate)
|
parsed = parseTextKVToolCalls(candidate)
|
||||||
}
|
}
|
||||||
if len(parsed) > 0 {
|
if len(parsed) > 0 {
|
||||||
result.SawToolCallSyntax = true
|
break
|
||||||
calls, rejectedNames := filterToolCallsDetailed(parsed, availableToolNames)
|
|
||||||
result.Calls = calls
|
|
||||||
result.RejectedToolNames = rejectedNames
|
|
||||||
result.RejectedByPolicy = len(rejectedNames) > 0 && len(calls) == 0
|
|
||||||
return result
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(parsed) == 0 {
|
||||||
|
parsed = parseXMLToolCalls(trimmed)
|
||||||
|
if len(parsed) == 0 {
|
||||||
|
parsed = parseTextKVToolCalls(trimmed)
|
||||||
|
if len(parsed) == 0 {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result.SawToolCallSyntax = true
|
||||||
|
calls, rejectedNames := filterToolCallsDetailed(parsed, availableToolNames)
|
||||||
|
result.Calls = calls
|
||||||
|
result.RejectedToolNames = rejectedNames
|
||||||
|
result.RejectedByPolicy = len(rejectedNames) > 0 && len(calls) == 0
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -171,7 +172,13 @@ func resolveAllowedToolName(name string, allowed map[string]struct{}, allowedCan
|
|||||||
func parseToolCallsPayload(payload string) []ParsedToolCall {
|
func parseToolCallsPayload(payload string) []ParsedToolCall {
|
||||||
var decoded any
|
var decoded any
|
||||||
if err := json.Unmarshal([]byte(payload), &decoded); err != nil {
|
if err := json.Unmarshal([]byte(payload), &decoded); err != nil {
|
||||||
return nil
|
// Try to repair backslashes first! Because LLMs often mix these two problems.
|
||||||
|
repaired := repairInvalidJSONBackslashes(payload)
|
||||||
|
// Try loose repair on top of that
|
||||||
|
repaired = RepairLooseJSON(repaired)
|
||||||
|
if err := json.Unmarshal([]byte(repaired), &decoded); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
switch v := decoded.(type) {
|
switch v := decoded.(type) {
|
||||||
case map[string]any:
|
case map[string]any:
|
||||||
@@ -248,32 +255,3 @@ func parseToolCallItem(m map[string]any) (ParsedToolCall, bool) {
|
|||||||
Input: parseToolCallInput(inputRaw),
|
Input: parseToolCallInput(inputRaw),
|
||||||
}, true
|
}, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseToolCallInput(v any) map[string]any {
|
|
||||||
switch x := v.(type) {
|
|
||||||
case nil:
|
|
||||||
return map[string]any{}
|
|
||||||
case map[string]any:
|
|
||||||
return x
|
|
||||||
case string:
|
|
||||||
raw := strings.TrimSpace(x)
|
|
||||||
if raw == "" {
|
|
||||||
return map[string]any{}
|
|
||||||
}
|
|
||||||
var parsed map[string]any
|
|
||||||
if err := json.Unmarshal([]byte(raw), &parsed); err == nil && parsed != nil {
|
|
||||||
return parsed
|
|
||||||
}
|
|
||||||
return map[string]any{"_raw": raw}
|
|
||||||
default:
|
|
||||||
b, err := json.Marshal(x)
|
|
||||||
if err != nil {
|
|
||||||
return map[string]any{}
|
|
||||||
}
|
|
||||||
var parsed map[string]any
|
|
||||||
if err := json.Unmarshal(b, &parsed); err == nil && parsed != nil {
|
|
||||||
return parsed
|
|
||||||
}
|
|
||||||
return map[string]any{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
func TestParseToolCalls(t *testing.T) {
|
func TestParseToolCalls(t *testing.T) {
|
||||||
text := `prefix {"tool_calls":[{"name":"search","input":{"q":"golang"}}]} suffix`
|
text := `prefix {"tool_calls":[{"name":"search","input":{"q":"golang"}}]} suffix`
|
||||||
@@ -19,8 +22,8 @@ func TestParseToolCalls(t *testing.T) {
|
|||||||
func TestParseToolCallsFromFencedJSON(t *testing.T) {
|
func TestParseToolCallsFromFencedJSON(t *testing.T) {
|
||||||
text := "I will call tools now\n```json\n{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"news\"}}]}\n```"
|
text := "I will call tools now\n```json\n{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"news\"}}]}\n```"
|
||||||
calls := ParseToolCalls(text, []string{"search"})
|
calls := ParseToolCalls(text, []string{"search"})
|
||||||
if len(calls) != 0 {
|
if len(calls) != 1 {
|
||||||
t.Fatalf("expected fenced tool_call example to be ignored, got %#v", calls)
|
t.Fatalf("expected fenced tool_call payload to be parsed, got %#v", calls)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,10 +99,10 @@ func TestFormatOpenAIToolCalls(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseStandaloneToolCallsOnlyMatchesStandalonePayload(t *testing.T) {
|
func TestParseStandaloneToolCallsSupportsMixedProsePayload(t *testing.T) {
|
||||||
mixed := `这里是示例:{"tool_calls":[{"name":"search","input":{"q":"go"}}]}`
|
mixed := `这里是示例:{"tool_calls":[{"name":"search","input":{"q":"go"}}]}`
|
||||||
if calls := ParseStandaloneToolCalls(mixed, []string{"search"}); len(calls) != 0 {
|
if calls := ParseStandaloneToolCalls(mixed, []string{"search"}); len(calls) != 1 {
|
||||||
t.Fatalf("expected standalone parser to ignore mixed prose, got %#v", calls)
|
t.Fatalf("expected standalone parser to parse mixed prose payload, got %#v", calls)
|
||||||
}
|
}
|
||||||
|
|
||||||
standalone := `{"tool_calls":[{"name":"search","input":{"q":"go"}}]}`
|
standalone := `{"tool_calls":[{"name":"search","input":{"q":"go"}}]}`
|
||||||
@@ -109,10 +112,10 @@ func TestParseStandaloneToolCallsOnlyMatchesStandalonePayload(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseStandaloneToolCallsIgnoresFencedCodeBlock(t *testing.T) {
|
func TestParseStandaloneToolCallsParsesFencedCodeBlock(t *testing.T) {
|
||||||
fenced := "```json\n{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go\"}}]}\n```"
|
fenced := "```json\n{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go\"}}]}\n```"
|
||||||
if calls := ParseStandaloneToolCalls(fenced, []string{"search"}); len(calls) != 0 {
|
if calls := ParseStandaloneToolCalls(fenced, []string{"search"}); len(calls) != 1 {
|
||||||
t.Fatalf("expected fenced tool_call example to be ignored, got %#v", calls)
|
t.Fatalf("expected fenced tool_call payload to be parsed, got %#v", calls)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -279,3 +282,238 @@ func TestParseToolCallsDoesNotAcceptMismatchedMarkupTags(t *testing.T) {
|
|||||||
t.Fatalf("expected mismatched tags to be rejected, got %#v", calls)
|
t.Fatalf("expected mismatched tags to be rejected, got %#v", calls)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRepairInvalidJSONBackslashes(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{`{"path": "C:\Users\name"}`, `{"path": "C:\\Users\name"}`},
|
||||||
|
{`{"cmd": "cd D:\git_codes"}`, `{"cmd": "cd D:\\git_codes"}`},
|
||||||
|
{`{"text": "line1\nline2"}`, `{"text": "line1\nline2"}`},
|
||||||
|
{`{"path": "D:\\back\\slash"}`, `{"path": "D:\\back\\slash"}`},
|
||||||
|
{`{"unicode": "\u2705"}`, `{"unicode": "\u2705"}`},
|
||||||
|
{`{"invalid_u": "\u123"}`, `{"invalid_u": "\\u123"}`},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := repairInvalidJSONBackslashes(tt.input)
|
||||||
|
if got != tt.expected {
|
||||||
|
t.Errorf("repairInvalidJSONBackslashes(%s) = %s; want %s", tt.input, got, tt.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepairLooseJSON(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{`{tool_calls: [{"name": "search", "input": {"q": "go"}}]}`, `{"tool_calls": [{"name": "search", "input": {"q": "go"}}]}`},
|
||||||
|
{`{name: "search", input: {q: "go"}}`, `{"name": "search", "input": {"q": "go"}}`},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := RepairLooseJSON(tt.input)
|
||||||
|
if got != tt.expected {
|
||||||
|
t.Errorf("RepairLooseJSON(%s) = %s; want %s", tt.input, got, tt.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseToolCallsWithUnquotedKeys(t *testing.T) {
|
||||||
|
text := `这里是列表:{tool_calls: [{"name": "todowrite", "input": {"todos": "test"}}]}`
|
||||||
|
availableTools := []string{"todowrite"}
|
||||||
|
|
||||||
|
parsed := ParseToolCalls(text, availableTools)
|
||||||
|
if len(parsed) != 1 {
|
||||||
|
t.Fatalf("expected 1 tool call, got %d", len(parsed))
|
||||||
|
}
|
||||||
|
if parsed[0].Name != "todowrite" {
|
||||||
|
t.Errorf("expected tool todowrite, got %s", parsed[0].Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseToolCallsWithInvalidBackslashes(t *testing.T) {
|
||||||
|
// DeepSeek sometimes outputs Windows paths with single backslashes in JSON strings
|
||||||
|
// Note: using raw string to simulate what AI actually sends in the stream
|
||||||
|
text := `好的,执行以下命令:{"name": "execute_command", "input": "{\"command\": \"cd D:\git_codes && dir\"}"}`
|
||||||
|
availableTools := []string{"execute_command"}
|
||||||
|
|
||||||
|
parsed := ParseToolCalls(text, availableTools)
|
||||||
|
// If standard JSON fails, buildToolCallCandidates should still extract the object,
|
||||||
|
// and parseToolCallsPayload should repair it.
|
||||||
|
if len(parsed) != 1 {
|
||||||
|
// If it still fails, let's see why
|
||||||
|
candidates := buildToolCallCandidates(text)
|
||||||
|
t.Logf("Candidates: %v", candidates)
|
||||||
|
t.Fatalf("expected 1 tool call, got %d", len(parsed))
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd, ok := parsed[0].Input["command"].(string)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected command string in input, got %v", parsed[0].Input)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := "cd D:\\git_codes && dir"
|
||||||
|
if cmd != expected {
|
||||||
|
t.Errorf("expected command %q, got %q", expected, cmd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseToolCallsWithDeepSeekHallucination(t *testing.T) {
|
||||||
|
// 模拟 DeepSeek 典型的幻觉输出:未加引号的键名 + 包含 Windows 路径的嵌套 JSON 字符串 + 漏掉列表的方括号
|
||||||
|
text := `检测到实施意图——实现经典算法。需在misc/目录创建Python文件。
|
||||||
|
关键约束:
|
||||||
|
1. Windows UTF-8编码处理
|
||||||
|
2. 必须用绝对路径导入
|
||||||
|
3. 禁止write覆盖已有文件(misc/目录允许创建新文件)
|
||||||
|
将任务分解并委托:
|
||||||
|
- 研究8皇后算法模式(并行探索)
|
||||||
|
- 实现带可视化输出的解决方案(unspecified-high)
|
||||||
|
先创建todo列表追踪步骤。
|
||||||
|
{tool_calls: [{"name": "todowrite", "input": {"todos": {"content": "研究8皇后问题算法模式(回溯法)和输出格式", "status": "pending", "priority": "high"}, {"content": "在misc/目录创建8皇后Python脚本,包含完整解决方案和可视化输出", "status": "pending", "priority": "high"}, {"content": "验证脚本正确性(运行测试)", "status": "pending", "priority": "medium"}}}]}`
|
||||||
|
|
||||||
|
availableTools := []string{"todowrite"}
|
||||||
|
parsed := ParseToolCalls(text, availableTools)
|
||||||
|
|
||||||
|
if len(parsed) != 1 {
|
||||||
|
cands := buildToolCallCandidates(text)
|
||||||
|
for i, c := range cands {
|
||||||
|
t.Logf("CAND %d: %s", i, c)
|
||||||
|
repaired := RepairLooseJSON(c)
|
||||||
|
t.Logf(" REPAIRED: %s", repaired)
|
||||||
|
}
|
||||||
|
t.Fatalf("expected 1 tool call, got %d. Candidates: %v", len(parsed), buildToolCallCandidates(text))
|
||||||
|
}
|
||||||
|
|
||||||
|
if parsed[0].Name != "todowrite" {
|
||||||
|
t.Errorf("expected tool name 'todowrite', got %q", parsed[0].Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
todos, ok := parsed[0].Input["todos"].([]any)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected 'todos' to be parsed as a list, got %T: %#v", parsed[0].Input["todos"], parsed[0].Input["todos"])
|
||||||
|
}
|
||||||
|
if len(todos) != 3 {
|
||||||
|
t.Errorf("expected 3 todo items, got %d", len(todos))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseToolCallsWithMixedWindowsPaths(t *testing.T) {
|
||||||
|
// 更复杂的案例:嵌套 JSON 字符串中的反斜杠未转义
|
||||||
|
text := `关键约束: 1. Windows UTF-8编码处理 2. 必须用绝对路径导入 D:\git_codes\ds2api\misc
|
||||||
|
{tool_calls: [{"name": "write_file", "input": "{\"path\": \"D:\\git_codes\\ds2api\\misc\\queens.py\", \"content\": \"print('hello')\"}"}]}`
|
||||||
|
|
||||||
|
availableTools := []string{"write_file"}
|
||||||
|
parsed := ParseToolCalls(text, availableTools)
|
||||||
|
|
||||||
|
if len(parsed) != 1 {
|
||||||
|
t.Fatalf("expected 1 tool call from mixed text with paths, got %d", len(parsed))
|
||||||
|
}
|
||||||
|
|
||||||
|
path, _ := parsed[0].Input["path"].(string)
|
||||||
|
// 在解析后的 Go map 中,反斜杠应该被还原
|
||||||
|
if !strings.Contains(path, "D:\\git_codes") && !strings.Contains(path, "D:/git_codes") {
|
||||||
|
t.Errorf("expected path to contain Windows style separators, got %q", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseToolCallInputRepairsControlCharsInPath(t *testing.T) {
|
||||||
|
in := `{"path":"D:\tmp\new\readme.txt","content":"line1\nline2"}`
|
||||||
|
parsed := parseToolCallInput(in)
|
||||||
|
|
||||||
|
path, ok := parsed["path"].(string)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected path string in parsed input, got %#v", parsed["path"])
|
||||||
|
}
|
||||||
|
if path != `D:\tmp\new\readme.txt` {
|
||||||
|
t.Fatalf("expected repaired windows path, got %q", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
content, ok := parsed["content"].(string)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected content string in parsed input, got %#v", parsed["content"])
|
||||||
|
}
|
||||||
|
if content != "line1\nline2" {
|
||||||
|
t.Fatalf("expected non-path field to keep decoded escapes, got %q", content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepairLooseJSONWithNestedObjects(t *testing.T) {
|
||||||
|
// 测试嵌套对象的修复:DeepSeek 幻觉输出,每个元素内部包含嵌套 {}
|
||||||
|
// 注意:正则只支持单层嵌套,不支持更深层次的嵌套
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
// 1. 单层嵌套对象(核心修复目标)
|
||||||
|
{
|
||||||
|
name: "单层嵌套 - 2个元素",
|
||||||
|
input: `"todos": {"content": "研究算法", "input": {"q": "8 queens"}}, {"content": "实现", "input": {"path": "queens.py"}}`,
|
||||||
|
expected: `"todos": [{"content": "研究算法", "input": {"q": "8 queens"}}, {"content": "实现", "input": {"path": "queens.py"}}]`,
|
||||||
|
},
|
||||||
|
// 2. 3个单层嵌套对象
|
||||||
|
{
|
||||||
|
name: "3个单层嵌套对象",
|
||||||
|
input: `"items": {"a": {"x":1}}, {"b": {"y":2}}, {"c": {"z":3}}`,
|
||||||
|
expected: `"items": [{"a": {"x":1}}, {"b": {"y":2}}, {"c": {"z":3}}]`,
|
||||||
|
},
|
||||||
|
// 3. 混合嵌套:有些字段是对象,有些是原始值
|
||||||
|
{
|
||||||
|
name: "混合嵌套 - 对象和原始值混合",
|
||||||
|
input: `"items": {"name": "test", "config": {"timeout": 30}}, {"name": "test2", "config": {"timeout": 60}}`,
|
||||||
|
expected: `"items": [{"name": "test", "config": {"timeout": 30}}, {"name": "test2", "config": {"timeout": 60}}]`,
|
||||||
|
},
|
||||||
|
// 4. 4个嵌套对象(边界测试)
|
||||||
|
{
|
||||||
|
name: "4个嵌套对象",
|
||||||
|
input: `"todos": {"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}`,
|
||||||
|
expected: `"todos": [{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}]`,
|
||||||
|
},
|
||||||
|
// 5. DeepSeek 典型幻觉:无空格逗号分隔
|
||||||
|
{
|
||||||
|
name: "无空格逗号分隔",
|
||||||
|
input: `"results": {"name": "a"}, {"name": "b"}, {"name": "c"}`,
|
||||||
|
expected: `"results": [{"name": "a"}, {"name": "b"}, {"name": "c"}]`,
|
||||||
|
},
|
||||||
|
// 6. 嵌套数组(数组在对象内,不是深层嵌套)
|
||||||
|
{
|
||||||
|
name: "对象内包含数组",
|
||||||
|
input: `"data": {"items": [1,2,3]}, {"items": [4,5,6]}`,
|
||||||
|
expected: `"data": [{"items": [1,2,3]}, {"items": [4,5,6]}]`,
|
||||||
|
},
|
||||||
|
// 7. 真实的 DeepSeek 8皇后问题输出
|
||||||
|
{
|
||||||
|
name: "DeepSeek 8皇后真实输出",
|
||||||
|
input: `"todos": {"content": "研究8皇后算法", "status": "pending"}, {"content": "实现Python脚本", "status": "pending"}, {"content": "验证结果", "status": "pending"}`,
|
||||||
|
expected: `"todos": [{"content": "研究8皇后算法", "status": "pending"}, {"content": "实现Python脚本", "status": "pending"}, {"content": "验证结果", "status": "pending"}]`,
|
||||||
|
},
|
||||||
|
// 8. 简单无嵌套对象(回归测试)
|
||||||
|
{
|
||||||
|
name: "简单无嵌套对象",
|
||||||
|
input: `"items": {"a": 1}, {"b": 2}`,
|
||||||
|
expected: `"items": [{"a": 1}, {"b": 2}]`,
|
||||||
|
},
|
||||||
|
// 9. 更复杂的单层嵌套
|
||||||
|
{
|
||||||
|
name: "复杂单层嵌套",
|
||||||
|
input: `"functions": {"name": "execute", "input": {"command": "ls"}}, {"name": "read", "input": {"file": "a.txt"}}`,
|
||||||
|
expected: `"functions": [{"name": "execute", "input": {"command": "ls"}}, {"name": "read", "input": {"file": "a.txt"}}]`,
|
||||||
|
},
|
||||||
|
// 10. 5个嵌套对象
|
||||||
|
{
|
||||||
|
name: "5个嵌套对象",
|
||||||
|
input: `"tasks": {"id":1}, {"id":2}, {"id":3}, {"id":4}, {"id":5}`,
|
||||||
|
expected: `"tasks": [{"id":1}, {"id":2}, {"id":3}, {"id":4}, {"id":5}]`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := RepairLooseJSON(tt.input)
|
||||||
|
if got != tt.expected {
|
||||||
|
t.Errorf("[%s] RepairLooseJSON with nested objects:\n input: %s\n got: %s\n expected: %s", tt.name, tt.input, got, tt.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -409,8 +409,8 @@ func TestParseToolCallsWithFunctionWrapper(t *testing.T) {
|
|||||||
func TestParseStandaloneToolCallsFencedCodeBlock(t *testing.T) {
|
func TestParseStandaloneToolCallsFencedCodeBlock(t *testing.T) {
|
||||||
fenced := "Here's an example:\n```json\n{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go\"}}]}\n```\nDon't execute this."
|
fenced := "Here's an example:\n```json\n{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go\"}}]}\n```\nDon't execute this."
|
||||||
calls := ParseStandaloneToolCalls(fenced, []string{"search"})
|
calls := ParseStandaloneToolCalls(fenced, []string{"search"})
|
||||||
if len(calls) != 0 {
|
if len(calls) != 1 {
|
||||||
t.Fatalf("expected fenced code block ignored, got %d calls", len(calls))
|
t.Fatalf("expected fenced code block to be parsed, got %d calls", len(calls))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,101 +0,0 @@
|
|||||||
# DeepSeek Function Calling 缺陷分析与 ds2api 的增强修复策略
|
|
||||||
|
|
||||||
> **相关 PR**: #74 (代码核心实现) 与 #75 (Merge to dev)
|
|
||||||
> **问题背景**: 解决因包括 DeepSeek 在内的部分模型在函数调用(Function Calling/Tool Call)表现不够“规范”,从而导致工具调用失败的问题。
|
|
||||||
|
|
||||||
## 一、底层架构对比:为什么会产生 Function Calling 缺陷?
|
|
||||||
|
|
||||||
在探讨缺陷前,我们需要理解两种 Function Calling 的底层结构差异:
|
|
||||||
|
|
||||||
### 1. OpenAI 的原生结构化返回 (API 级分离)
|
|
||||||
在 OpenAI 的规范中,**聊天文字与工具调用是在底层的 JSON 结构中被硬性拆分的**:
|
|
||||||
* 聊天废话存放在 `response.choices[0].message.content` 里。
|
|
||||||
* 工具请求存放在单独的数组 `response.choices[0].message.tool_calls` 里。
|
|
||||||
|
|
||||||
**优势:** 这种设计对客户端极其友好。客户端只需判断 `tool_calls` 是否为空,就能决定是执行代码还是渲染文字。它支持同时并发多个工具请求,且底层的生成殷勤被严格训练和约束,极少抛出语法错误的 JSON。
|
|
||||||
|
|
||||||
### 2. DeepSeek 等模型的“单文本流”机制
|
|
||||||
相比之下,部分未经深度专门微调的模型(或者在特定的通信适配层中),它们依然倾向于把一切内容打包成一个纯文本流吐出。这就是为什么它们的输出往往不仅包含了本该属于 `tool_calls` 结构里的 JSON,还会像个“老实人”一样夹杂了属于 `content` 里的散文。
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 二、DeepSeek 在 Function Calling 上的特定缺陷表现
|
|
||||||
|
|
||||||
相比于 OpenAI 严格遵循 API 约定的原生结构,DeepSeek 等开源/国产推理模型在工具调用时,经常会暴露出以下三种典型的“不守规矩”的输出行为:
|
|
||||||
|
|
||||||
### 1. 混合输出:散文文本与工具 JSON 混杂 (Mixed Prose Streams)
|
|
||||||
当应用要求模型直接返回工具请求时,DeepSeek 有时候会**“忍不住想和用户搭话”**。
|
|
||||||
它常常前置一段解释性废话,中间插入工具调用的 JSON 参数,并在末尾再补上一句总结:
|
|
||||||
```text
|
|
||||||
好的,我这就帮你读取 README.md 的内容:
|
|
||||||
{"tool_calls":[{"name":"read_file","input":{"path":"README.md"}}]}
|
|
||||||
请稍等片刻,我马上把它读出来。
|
|
||||||
```
|
|
||||||
**旧版系统痛点:**
|
|
||||||
原有的代码存在**严格模式(Strict Mode)**校验:
|
|
||||||
```go
|
|
||||||
// 如果解析到的 JSON 块前后存在任何非空字符串,就放弃当作工具调用!
|
|
||||||
if strings.TrimSpace(state.recentTextTail) != "" || strings.TrimSpace(prefixPart) != "" ... {
|
|
||||||
return captured, nil, "", true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
这直接导致上述结构被网关认定是一段“普通聊天”,直接原封不动地返回给用户,这直接干挂了后续的工具自动执行流程。
|
|
||||||
|
|
||||||
### 2. 工具名格式幻觉:擅自修改或前缀化工具名称
|
|
||||||
由于 DeepSeek 的预训练数据中有大量的代码和不同的平台结构,它在回复工具名称时,常常无法忠实于 System Prompt 中提供的纯命名(也就是 `name: "read_file"`),而是加上前缀或者拼写变形,例如:
|
|
||||||
* `{"name": "mcp.search_web"}` (自带命名空间)
|
|
||||||
* `{"name": "tools.read_file"}`
|
|
||||||
* `{"name": "search-web"}` (下划线变成了中划线)
|
|
||||||
|
|
||||||
**旧版系统痛点:**
|
|
||||||
旧版系统对于工具名的匹配几乎只有“绝对相等”的字典级比对,只要差了一个字符或加了前缀,就会由于找不到合法工具而直接失败。
|
|
||||||
|
|
||||||
### 3. Role 角色的非标准返回
|
|
||||||
在部分工具通信流的响应中,返回的内容其所属的 `role` 没有被标准化处理,可能携带意料之外的属性,或是与下游严格比对出现冲突。
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 二、PR #74 的代码增强修复方案
|
|
||||||
|
|
||||||
为了解决大模型这种自身的不规范行为,PR #74 在系统的中间层网关联入了一个**极其包容的容错引擎**。它并不强制要求模型“改过自新”,而是主动做了以下三块增强:
|
|
||||||
|
|
||||||
### 1. 从流中分离混合内容(废除 Strict Mode)
|
|
||||||
修改了 `internal/adapter/openai/tool_sieve_core.go`。
|
|
||||||
取消了前后包裹文本的拦截逻辑。当系统扫描到流式结构中有完整的 `{"tool_calls":...}` 时,它会将废话和 JSON 分发到不同的事件流中:
|
|
||||||
```go
|
|
||||||
if prefix != "" {
|
|
||||||
// 将前面的“好的,帮你读文件”剥离出来作为常规文本输出
|
|
||||||
state.noteText(prefix)
|
|
||||||
events = append(events, toolStreamEvent{Content: prefix})
|
|
||||||
}
|
|
||||||
// 捕获并拦截中间的工具请求,进行背后执行
|
|
||||||
state.pendingToolCalls = calls
|
|
||||||
```
|
|
||||||
**效果:** 用户的屏幕上只能看到正常的文字交流,而后端的工具也会立刻挂载。
|
|
||||||
|
|
||||||
### 2. 多级宽容匹配引擎 (Resolve Allowed Tool Name)
|
|
||||||
在 `internal/util/toolcalls_parse.go` 中,新增了一个由严到松降级匹配的强大漏斗策略函数 `resolveAllowedToolName`:
|
|
||||||
|
|
||||||
1. **绝对匹配**:和以前一样,`read_file` == `read_file`。
|
|
||||||
2. **忽略大小写**:`Read_File` 算作合法。
|
|
||||||
3. **命名空间抹除**:通过寻找最后一个 `.` 来剥离前缀,强制将 `mcp.search_web` 还原出真实的 `search_web`。
|
|
||||||
4. **终极正则清洗**:
|
|
||||||
引入 `var toolNameLoosePattern = regexp.MustCompile(`[^a-z0-9]+`)`。
|
|
||||||
这个正则剥离了字符串里所有的符号、空格、格式符。
|
|
||||||
将传入的 `read-file` 洗除符号成为 `readfile`,并去和系统中所有合法工具同样清洗后的版本进行比较。只要核心字母一致,即算作匹配成功。
|
|
||||||
|
|
||||||
### 3. Role 归一化 (Normalize OpenAIRoleForPrompt)
|
|
||||||
在 `internal/adapter/openai/responses_input_items.go` 等处,引入了特定的 `normalizeOpenAIRoleForPrompt(role)` 清洗,保证输入和传递给上游的 Role 枚举始终受控,消除了因为意外的身份字段传参崩溃。
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 报告总结与 tool_sieve 的本质作用
|
|
||||||
|
|
||||||
PR #74 / #75 并没有从模型本身开刀,而是基于**网关应足够健壮**的设计哲学。
|
|
||||||
|
|
||||||
**其实整个增强实现,本质上实现了一个名为 `tool_sieve` (工具筛子) 的中间层网关。**
|
|
||||||
面对 DeepSeek 这种吐出一团混合了聊天文字与 JSON 面团的“不标准”数据流,`tool_sieve` 就像一个勤劳的高精度筛子,不仅人工揉开了面团:
|
|
||||||
1. 它把散文分拣出来,塞回标准结构的 `content` 字段去展示;
|
|
||||||
2. 剥离并清洗出有瑕疵的 JSON 块,按照 OpenAI 的标准格式小心翼翼地放进 `tool_calls` 结构里去等待执行。
|
|
||||||
|
|
||||||
这意味着,即便 AI 被配置了奇怪的回复设定、加粗了强调语言,甚至是犯了标点符号拼写小失误,**只要它输出了可以拼凑成工具指令的 JSON 核心单元,整个中继层就能将其挽救,并把正确的工具结果呈现给模型和用户**。 这不仅修复了缺陷,更极大地增强了工具网关的通用性和鲁棒性。
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
# DS2API Refactor Baseline (Historical Snapshot)
|
|
||||||
|
|
||||||
- Snapshot time: `2026-02-22T08:53:54Z`
|
|
||||||
- Snapshot branch: `dev`
|
|
||||||
- Snapshot HEAD: `5d3989a`
|
|
||||||
- Scope: backend + node api + webui large-file decoupling (no behavior change)
|
|
||||||
|
|
||||||
## Gate Commands
|
|
||||||
|
|
||||||
1. `./tests/scripts/run-unit-all.sh`
|
|
||||||
- Result: PASS
|
|
||||||
- Includes:
|
|
||||||
- `go test ./...`
|
|
||||||
- `node --test api/helpers/stream-tool-sieve.test.js api/chat-stream.test.js api/compat/js_compat_test.js`
|
|
||||||
2. `npm --prefix webui run build`
|
|
||||||
- Result: PASS
|
|
||||||
3. `./tests/scripts/check-refactor-line-gate.sh`
|
|
||||||
- Result: PASS (`checked=131 missing=0 over_limit=0`)
|
|
||||||
4. Stage gates (1-5) replay:
|
|
||||||
- `go test ./internal/config ./internal/admin ./internal/account ./internal/deepseek ./internal/format/openai` -> PASS
|
|
||||||
- `go test ./internal/adapter/openai ./internal/util ./internal/sse ./internal/compat` -> PASS
|
|
||||||
- `go test ./internal/adapter/claude ./internal/adapter/gemini ./internal/config` -> PASS
|
|
||||||
- `go test ./internal/testsuite ./cmd/ds2api-tests` -> PASS
|
|
||||||
- `node --test api/helpers/stream-tool-sieve.test.js api/chat-stream.test.js api/compat/js_compat_test.js` -> PASS
|
|
||||||
5. Final full regression:
|
|
||||||
- `go test ./... -count=1` -> PASS
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
- This file records a historical baseline for refactor process tracking.
|
|
||||||
- It is not intended to represent the current repository HEAD.
|
|
||||||
- Frontend manual smoke for phase 6 still requires human execution and sign-off.
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
# Refactor Line Gate
|
|
||||||
|
|
||||||
## Rules
|
|
||||||
|
|
||||||
1. Backend production files upper bound: `<= 300` lines.
|
|
||||||
2. Frontend (`webui/`) production files upper bound: `<= 500` lines.
|
|
||||||
3. Entry/facade files upper bound: `<= 120` lines.
|
|
||||||
4. Scope is limited to target files in `plans/refactor-line-gate-targets.txt`.
|
|
||||||
5. Test files are out of scope for this gate.
|
|
||||||
|
|
||||||
## Command
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./tests/scripts/check-refactor-line-gate.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## Naming Note
|
|
||||||
|
|
||||||
- Original split plan used `internal/admin/handler_accounts_test.go` for account probing logic.
|
|
||||||
- In Go, `*_test.go` files are test-only compilation units and cannot host production handlers.
|
|
||||||
- The production file is implemented as `internal/admin/handler_accounts_testing.go`.
|
|
||||||
|
|
||||||
@@ -1,6 +1,13 @@
|
|||||||
{
|
{
|
||||||
"calls": [],
|
"calls": [
|
||||||
"sawToolCallSyntax": false,
|
{
|
||||||
|
"name": "read_file",
|
||||||
|
"input": {
|
||||||
|
"path": "README.MD"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sawToolCallSyntax": true,
|
||||||
"rejectedByPolicy": false,
|
"rejectedByPolicy": false,
|
||||||
"rejectedToolNames": []
|
"rejectedToolNames": []
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,13 @@
|
|||||||
{
|
{
|
||||||
"calls": [],
|
"calls": [
|
||||||
"sawToolCallSyntax": false,
|
{
|
||||||
|
"name": "read_file",
|
||||||
|
"input": {
|
||||||
|
"path": "README.MD"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sawToolCallSyntax": true,
|
||||||
"rejectedByPolicy": false,
|
"rejectedByPolicy": false,
|
||||||
"rejectedToolNames": []
|
"rejectedToolNames": []
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,13 @@
|
|||||||
{
|
{
|
||||||
"calls": [],
|
"calls": [
|
||||||
|
{
|
||||||
|
"name": "read_file",
|
||||||
|
"input": {
|
||||||
|
"path": "README.MD"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
"sawToolCallSyntax": true,
|
"sawToolCallSyntax": true,
|
||||||
"rejectedByPolicy": false,
|
"rejectedByPolicy": false,
|
||||||
"rejectedToolNames": []
|
"rejectedToolNames": []
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -91,7 +91,9 @@ test('parseToolCalls supports fenced json and function.arguments string payload'
|
|||||||
'```',
|
'```',
|
||||||
].join('\n');
|
].join('\n');
|
||||||
const calls = parseToolCalls(text, ['read_file']);
|
const calls = parseToolCalls(text, ['read_file']);
|
||||||
assert.equal(calls.length, 0);
|
assert.equal(calls.length, 1);
|
||||||
|
assert.equal(calls[0].name, 'read_file');
|
||||||
|
assert.equal(calls[0].input.path, 'README.md');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('parseToolCalls parses text-kv fallback payload', () => {
|
test('parseToolCalls parses text-kv fallback payload', () => {
|
||||||
@@ -122,19 +124,19 @@ test('parseToolCalls parses multiple text-kv fallback payloads', () => {
|
|||||||
assert.equal(calls[1].name, 'bash');
|
assert.equal(calls[1].name, 'bash');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('parseStandaloneToolCalls only matches standalone payload and ignores mixed prose', () => {
|
test('parseStandaloneToolCalls parses mixed prose payload', () => {
|
||||||
const mixed = '这里是示例:{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]},请勿执行。';
|
const mixed = '这里是示例:{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]},请勿执行。';
|
||||||
const standalone = '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}';
|
const standalone = '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}';
|
||||||
const mixedCalls = parseStandaloneToolCalls(mixed, ['read_file']);
|
const mixedCalls = parseStandaloneToolCalls(mixed, ['read_file']);
|
||||||
const standaloneCalls = parseStandaloneToolCalls(standalone, ['read_file']);
|
const standaloneCalls = parseStandaloneToolCalls(standalone, ['read_file']);
|
||||||
assert.equal(mixedCalls.length, 0);
|
assert.equal(mixedCalls.length, 1);
|
||||||
assert.equal(standaloneCalls.length, 1);
|
assert.equal(standaloneCalls.length, 1);
|
||||||
});
|
});
|
||||||
|
|
||||||
test('parseStandaloneToolCalls ignores fenced code block tool_call examples', () => {
|
test('parseStandaloneToolCalls parses fenced code block tool_call payload', () => {
|
||||||
const fenced = ['```json', '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}', '```'].join('\n');
|
const fenced = ['```json', '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}', '```'].join('\n');
|
||||||
const calls = parseStandaloneToolCalls(fenced, ['read_file']);
|
const calls = parseStandaloneToolCalls(fenced, ['read_file']);
|
||||||
assert.equal(calls.length, 0);
|
assert.equal(calls.length, 1);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
@@ -259,28 +261,28 @@ test('sieve emits final tool_calls for split arguments payload without increment
|
|||||||
assert.deepEqual(finalCalls[0].input, { path: 'README.MD', mode: 'head' });
|
assert.deepEqual(finalCalls[0].input, { path: 'README.MD', mode: 'head' });
|
||||||
});
|
});
|
||||||
|
|
||||||
test('sieve keeps tool json as text when leading prose exists (strict mode)', () => {
|
test('sieve still emits tool_calls when leading prose exists before tool json', () => {
|
||||||
const events = runSieve(
|
const events = runSieve(
|
||||||
['我将调用工具。', '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}'],
|
['我将调用工具。', '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}'],
|
||||||
['read_file'],
|
['read_file'],
|
||||||
);
|
);
|
||||||
const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0));
|
const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0));
|
||||||
const leakedText = collectText(events);
|
const leakedText = collectText(events);
|
||||||
assert.equal(hasTool, false);
|
assert.equal(hasTool, true);
|
||||||
assert.equal(leakedText.includes('我将调用工具。'), true);
|
assert.equal(leakedText.includes('我将调用工具。'), true);
|
||||||
assert.equal(leakedText.toLowerCase().includes('tool_calls'), true);
|
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false);
|
||||||
});
|
});
|
||||||
|
|
||||||
test('sieve keeps same-chunk trailing prose payload as text in strict mode', () => {
|
test('sieve emits tool_calls and keeps trailing prose when payload and prose share a chunk', () => {
|
||||||
const events = runSieve(
|
const events = runSieve(
|
||||||
['{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}然后继续解释。'],
|
['{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}然后继续解释。'],
|
||||||
['read_file'],
|
['read_file'],
|
||||||
);
|
);
|
||||||
const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0));
|
const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0));
|
||||||
const leakedText = collectText(events);
|
const leakedText = collectText(events);
|
||||||
assert.equal(hasTool, false);
|
assert.equal(hasTool, true);
|
||||||
assert.equal(leakedText.includes('然后继续解释。'), true);
|
assert.equal(leakedText.includes('然后继续解释。'), true);
|
||||||
assert.equal(leakedText.toLowerCase().includes('tool_calls'), true);
|
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false);
|
||||||
});
|
});
|
||||||
|
|
||||||
test('formatOpenAIStreamToolCalls reuses ids with the same idStore', () => {
|
test('formatOpenAIStreamToolCalls reuses ids with the same idStore', () => {
|
||||||
|
|||||||
77
tests/repair_json_tool.go
Normal file
77
tests/repair_json_tool.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func repairInvalidJSONBackslashes(s string) string {
|
||||||
|
if !strings.Contains(s, "\\") {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
var out strings.Builder
|
||||||
|
out.Grow(len(s) + 10)
|
||||||
|
runes := []rune(s)
|
||||||
|
for i := 0; i < len(runes); i++ {
|
||||||
|
if runes[i] == '\\' {
|
||||||
|
if i+1 < len(runes) {
|
||||||
|
next := runes[i+1]
|
||||||
|
switch next {
|
||||||
|
case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
|
||||||
|
out.WriteRune('\\')
|
||||||
|
out.WriteRune(next)
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
case 'u':
|
||||||
|
if i+5 < len(runes) {
|
||||||
|
isHex := true
|
||||||
|
for j := 1; j <= 4; j++ {
|
||||||
|
r := runes[i+1+j]
|
||||||
|
if !((r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')) {
|
||||||
|
isHex = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isHex {
|
||||||
|
out.WriteRune('\\')
|
||||||
|
out.WriteRune('u')
|
||||||
|
for j := 1; j <= 4; j++ {
|
||||||
|
out.WriteRune(runes[i+1+j])
|
||||||
|
}
|
||||||
|
i += 5
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Not a valid escape sequence, double it
|
||||||
|
out.WriteString("\\\\")
|
||||||
|
} else {
|
||||||
|
out.WriteRune(runes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{`{"path": "C:\Users\name"}`, `{"path": "C:\\Users\\name"}`},
|
||||||
|
{`{"cmd": "cd D:\git_codes"}`, `{"cmd": "cd D:\\git_codes"}`},
|
||||||
|
{`{"text": "line1\nline2"}`, `{"text": "line1\nline2"}`},
|
||||||
|
{`{"path": "D:\\back\\slash"}`, `{"path": "D:\\back\\slash"}`},
|
||||||
|
{`{"unicode": "\u2705"}`, `{"unicode": "\u2705"}`},
|
||||||
|
{`{"invalid_u": "\u123"}`, `{"invalid_u": "\\u123"}`},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := repairInvalidJSONBackslashes(tt.input)
|
||||||
|
if got != tt.expected {
|
||||||
|
fmt.Printf("FAIL: input=%s\n got=%s\n exp=%s\n", tt.input, got, tt.expected)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("PASS: input=%s\n", tt.input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user