Compare commits

..

34 Commits

Author SHA1 Message Date
CJACK.
1c749b6803 Merge pull request #73 from CJackHwang/dev
Merge pull request #72 from CJackHwang/codex/review-changes-to-test-account-logic

Normalize mobile login numbers, skip completion flow for session-only account tests, and add tests
2026-03-03 00:07:57 +08:00
CJACK.
c329bf26b6 Merge pull request #72 from CJackHwang/codex/review-changes-to-test-account-logic
Normalize mobile login numbers, skip completion flow for session-only account tests, and add tests
2026-03-02 23:56:27 +08:00
CJACK.
3ae5b57ebe fix(deepseek): normalize mobile before login token refresh 2026-03-02 23:48:54 +08:00
CJACK.
0bf5d5440c Merge pull request #69 from CJackHwang/dev
js对齐
2026-03-01 07:22:42 +08:00
CJACK
d731a1fd4f 门禁 2026-03-01 07:20:24 +08:00
CJACK
93e9fb531d js对齐 2026-03-01 07:15:35 +08:00
CJACK.
6daeb2553d Merge pull request #68 from CJackHwang/dev
修复严重问题
2026-03-01 06:53:23 +08:00
CJACK
321b8a89ee 优化 2026-03-01 06:42:07 +08:00
CJACK
d84875e466 工具调用优化 2026-03-01 06:33:49 +08:00
CJACK
ea8c9a28a9 更新readme和icon 2026-03-01 06:22:41 +08:00
CJACK
a302fb3c25 修复 2026-03-01 05:55:46 +08:00
CJACK.
958bd124cc Merge pull request #64 from CJackHwang/dev
修复已知问题
2026-02-28 18:58:46 +08:00
CJACK.
b89e154e43 Merge pull request #63 from CJackHwang/codex/fix-issues-in-image-analysis
Use repository root Dockerfile, make Go cross-build robust, and fix process wait logic
2026-02-28 18:51:57 +08:00
CJACK.
01924f4a69 fix(docker): auto-detect target arch for local ARM builds 2026-02-28 18:39:33 +08:00
CJACK.
3725694bdf Merge pull request #61 from ronghuaxueleng/main
feat(webui): 账号列表添加搜索过滤功能
2026-02-28 18:16:41 +08:00
root
21b12f583a fix(admin): 账号测试始终发送默认消息以验证完整链路
测试接口不再仅验证会话创建,改为始终发送「你是谁?」
走完整 completion 路径,确保被封禁账号能被正确识别为失败。
2026-02-28 10:18:26 +08:00
root
d97b86e0ee feat(webui): 账号列表添加搜索过滤功能
- 后端 GET /admin/accounts 支持 ?q= 参数,大小写不敏感匹配 identifier/email/mobile
- 前端搜索框内嵌于标题栏按钮行(测试全部按钮前)
- 搜索时重置到第 1 页,分页 total 反映过滤后数量
- 无匹配结果时显示专属提示文案(中英文)
2026-02-28 09:57:19 +08:00
qiangcao
0869ea56cd Merge branch 'CJackHwang:main' into main 2026-02-28 09:18:20 +08:00
CJACK.
4768440627 Merge pull request #60 from CJackHwang/main
同步
2026-02-27 23:18:44 +08:00
root
37b867c7ad Merge branch 'docker' 2026-02-27 20:59:16 +08:00
root
25ea28a277 feat: 账号测试状态持久化、分页选择器、点击账号名复制
- Account 结构加 TestStatus 字段,测试后写入 config.json
- listAccounts 接口返回 test_status,前端根据结果显示红/绿/黄状态点
- 分页选择器支持 10/20/50/100/500/1000/2000/5000
- 点击账号名自动复制到剪贴板,hover 显示复制图标,复制后显示绿色对勾
2026-02-27 20:58:18 +08:00
root
0ac49ab32b merge: 合并 main 分支到 docker,保留 docker-compose.yml 和 start.mjs 2026-02-27 20:21:20 +08:00
root
70c59eb71d chore: 将 .claude/ 和 CLAUDE.local.md 从 git 跟踪中排除 2026-02-27 20:19:00 +08:00
root
962700f525 chore: 删除无用文件,清理 .gitignore Python 残留规则 2026-02-18 21:06:02 +08:00
root
e143d13ff6 feat: 编译和安装依赖使用国内镜像 2026-02-18 20:57:23 +08:00
root
2f853d7364 feat: 重写 start.mjs 适配 Go 运行时 2026-02-18 20:53:10 +08:00
root
36099a4ada chore: 删除 Python 残留文件(项目已迁移至 Go) 2026-02-18 20:50:07 +08:00
root
73bdb55cee merge: 合并 main 分支到 docker,保留 docker-compose.yml 和分页接口 2026-02-18 20:38:53 +08:00
root
3f3198c959 feat: 账号管理界面优化
- 账号列表支持分页(每页10条,倒序显示)
- API 密钥列表支持展开/关闭
2026-02-07 13:40:14 +08:00
root
6b8f7f8821 feat: 启动脚本显示所有环境变量 2026-02-07 10:55:34 +08:00
root
ac9a1ae742 merge: 合并 main 分支到 docker 2026-02-07 10:28:18 +08:00
root
bd4c2bacbc merge: 合并 main 分支到 docker 2026-02-02 20:31:42 +08:00
root
6cfc7051c4 Merge remote-tracking branch 'origin/main' into docker 2026-02-02 20:29:11 +08:00
root
22a2a97a76 feat: 添加 Docker 和 GitHub Actions 支持
- 添加 docker/Dockerfile 多阶段构建(前端+后端)
- 添加 docker-compose.yml 支持阿里云镜像部署
- 添加 .github/workflows/release.yml 自动发布到阿里云
- 添加 .dockerignore 优化构建
- 添加 VERSION 版本管理文件
- 添加 start.mjs 本地开发启动脚本
2026-02-02 20:23:33 +08:00
75 changed files with 3206 additions and 1854 deletions

View File

@@ -1,20 +1,20 @@
#### 💻 变更类型 | Change Type #### 💻 变更类型 | Change Type
<!-- For change type, change [ ] to [x]. --> <!-- For change type, change [ ] to [x]. -->
- [ ] ✨ feat - [ ] ✨ feat
- [ ] 🐛 fix - [ ] 🐛 fix
- [ ] ♻️ refactor - [ ] ♻️ refactor
- [ ] 💄 style - [ ] 💄 style
- [ ] 👷 build - [ ] 👷 build
- [ ] ⚡️ perf - [ ] ⚡️ perf
- [ ] 📝 docs - [ ] 📝 docs
- [ ] 🔨 chore - [ ] 🔨 chore
#### 🔀 变更说明 | Description of Change #### 🔀 变更说明 | Description of Change
<!-- Thank you for your Pull Request. Please provide a description above. -->
#### 📝 补充信息 | Additional Information #### 📝 补充信息 | Additional Information
<!-- Add any other context about the Pull Request here. -->

127
.github/workflows/release-dockerhub.yml vendored Normal file
View File

@@ -0,0 +1,127 @@
name: Release to Docker Hub
on:
workflow_dispatch:
inputs:
version_type:
description: '版本类型'
required: true
default: 'patch'
type: choice
options:
- patch
- minor
- major
permissions:
contents: write
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v5
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Get current version
id: get_version
run: |
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
TAG_VERSION=${LATEST_TAG#v}
if [ -f VERSION ]; then
FILE_VERSION=$(cat VERSION | tr -d '[:space:]')
else
FILE_VERSION="0.0.0"
fi
function version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
if version_gt "$FILE_VERSION" "$TAG_VERSION"; then
VERSION="$FILE_VERSION"
else
VERSION="$TAG_VERSION"
fi
echo "Current version: $VERSION"
echo "current_version=$VERSION" >> $GITHUB_OUTPUT
- name: Calculate next version
id: next_version
env:
VERSION_TYPE: ${{ github.event.inputs.version_type }}
run: |
VERSION="${{ steps.get_version.outputs.current_version }}"
BASE_VERSION=$(echo "$VERSION" | sed 's/-.*$//')
IFS='.' read -r -a version_parts <<< "$BASE_VERSION"
MAJOR="${version_parts[0]:-0}"
MINOR="${version_parts[1]:-0}"
PATCH="${version_parts[2]:-0}"
case "$VERSION_TYPE" in
major)
NEW_VERSION="$((MAJOR + 1)).0.0"
;;
minor)
NEW_VERSION="${MAJOR}.$((MINOR + 1)).0"
;;
*)
NEW_VERSION="${MAJOR}.${MINOR}.$((PATCH + 1))"
;;
esac
echo "New version: $NEW_VERSION"
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
echo "new_tag=v$NEW_VERSION" >> $GITHUB_OUTPUT
- name: Update VERSION file
run: |
echo "${{ steps.next_version.outputs.new_version }}" > VERSION
- name: Commit VERSION and create tag
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add VERSION
if ! git diff --cached --quiet; then
git commit -m "chore: bump version to ${{ steps.next_version.outputs.new_tag }} [skip ci]"
fi
NEW_TAG="${{ steps.next_version.outputs.new_tag }}"
git tag -a "$NEW_TAG" -m "Release $NEW_TAG"
git push origin HEAD:main "$NEW_TAG"
# Docker 构建并推送到 Docker Hub
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push Docker image
uses: docker/build-push-action@v6
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/ds2api:${{ steps.next_version.outputs.new_tag }}
${{ secrets.DOCKERHUB_USERNAME }}/ds2api:${{ steps.next_version.outputs.new_version }}
${{ secrets.DOCKERHUB_USERNAME }}/ds2api:latest
labels: |
org.opencontainers.image.version=${{ steps.next_version.outputs.new_version }}
org.opencontainers.image.revision=${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max

128
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,128 @@
name: Release to Aliyun CR
on:
workflow_dispatch:
inputs:
version_type:
description: '版本类型'
required: true
default: 'patch'
type: choice
options:
- patch
- minor
- major
permissions:
contents: write
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v5
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Get current version
id: get_version
run: |
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
TAG_VERSION=${LATEST_TAG#v}
if [ -f VERSION ]; then
FILE_VERSION=$(cat VERSION | tr -d '[:space:]')
else
FILE_VERSION="0.0.0"
fi
function version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
if version_gt "$FILE_VERSION" "$TAG_VERSION"; then
VERSION="$FILE_VERSION"
else
VERSION="$TAG_VERSION"
fi
echo "Current version: $VERSION"
echo "current_version=$VERSION" >> $GITHUB_OUTPUT
- name: Calculate next version
id: next_version
env:
VERSION_TYPE: ${{ github.event.inputs.version_type }}
run: |
VERSION="${{ steps.get_version.outputs.current_version }}"
BASE_VERSION=$(echo "$VERSION" | sed 's/-.*$//')
IFS='.' read -r -a version_parts <<< "$BASE_VERSION"
MAJOR="${version_parts[0]:-0}"
MINOR="${version_parts[1]:-0}"
PATCH="${version_parts[2]:-0}"
case "$VERSION_TYPE" in
major)
NEW_VERSION="$((MAJOR + 1)).0.0"
;;
minor)
NEW_VERSION="${MAJOR}.$((MINOR + 1)).0"
;;
*)
NEW_VERSION="${MAJOR}.${MINOR}.$((PATCH + 1))"
;;
esac
echo "New version: $NEW_VERSION"
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
echo "new_tag=v$NEW_VERSION" >> $GITHUB_OUTPUT
- name: Update VERSION file
run: |
echo "${{ steps.next_version.outputs.new_version }}" > VERSION
- name: Commit VERSION and create tag
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add VERSION
if ! git diff --cached --quiet; then
git commit -m "chore: bump version to ${{ steps.next_version.outputs.new_tag }} [skip ci]"
fi
NEW_TAG="${{ steps.next_version.outputs.new_tag }}"
git tag -a "$NEW_TAG" -m "Release $NEW_TAG"
git push origin HEAD:main "$NEW_TAG"
# Docker 构建并推送到阿里云
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Aliyun Container Registry
uses: docker/login-action@v3
with:
registry: ${{ secrets.ALIYUN_REGISTRY }}
username: ${{ secrets.ALIYUN_REGISTRY_USER }}
password: ${{ secrets.ALIYUN_REGISTRY_PASSWORD }}
- name: Build and push Docker image
uses: docker/build-push-action@v6
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.ALIYUN_REGISTRY }}/${{ secrets.ALIYUN_REGISTRY_NAMESPACE }}/ds2api:${{ steps.next_version.outputs.new_tag }}
${{ secrets.ALIYUN_REGISTRY }}/${{ secrets.ALIYUN_REGISTRY_NAMESPACE }}/ds2api:${{ steps.next_version.outputs.new_version }}
${{ secrets.ALIYUN_REGISTRY }}/${{ secrets.ALIYUN_REGISTRY_NAMESPACE }}/ds2api:latest
labels: |
org.opencontainers.image.version=${{ steps.next_version.outputs.new_version }}
org.opencontainers.image.revision=${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max

40
.gitignore vendored
View File

@@ -2,37 +2,6 @@
config.json config.json
.env .env
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# Virtual environments
venv/
ENV/
env/
.venv
# IDE # IDE
.vscode/ .vscode/
.idea/ .idea/
@@ -44,7 +13,6 @@ env/
# Logs # Logs
*.log *.log
logs/ logs/
uvicorn.log
artifacts/ artifacts/
# Vercel # Vercel
@@ -56,8 +24,6 @@ webui/node_modules/
webui/dist/ webui/dist/
.npm .npm
.pnpm-store/ .pnpm-store/
# 保留 webui/package-lock.json 用于 CI 缓存
# package-lock.json # 如果有根目录的可以忽略
yarn.lock yarn.lock
pnpm-lock.yaml pnpm-lock.yaml
@@ -86,7 +52,9 @@ coverage*.out
cover/ cover/
# Misc # Misc
*.pyc
*.pyo
.git/ .git/
Thumbs.db Thumbs.db
# Claude Code
.claude/
CLAUDE.local.md

View File

@@ -8,12 +8,15 @@ RUN npm run build
FROM golang:1.24 AS go-builder FROM golang:1.24 AS go-builder
WORKDIR /app WORKDIR /app
ARG TARGETOS=linux ARG TARGETOS
ARG TARGETARCH=amd64 ARG TARGETARCH
COPY go.mod go.sum* ./ COPY go.mod go.sum* ./
RUN go mod download RUN go mod download
COPY . . COPY . .
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /out/ds2api ./cmd/ds2api RUN set -eux; \
GOOS="${TARGETOS:-$(go env GOOS)}"; \
GOARCH="${TARGETARCH:-$(go env GOARCH)}"; \
CGO_ENABLED=0 GOOS="${GOOS}" GOARCH="${GOARCH}" go build -o /out/ds2api ./cmd/ds2api
FROM busybox:1.36.1-musl AS busybox-tools FROM busybox:1.36.1-musl AS busybox-tools

View File

@@ -1,5 +1,5 @@
<p align="center"> <p align="center">
<img src="assets/ds2api-icon.svg" width="128" height="128" alt="DS2API icon" /> <img src="webui/public/ds2api-favicon.svg" width="128" height="128" alt="DS2API icon" />
</p> </p>
# DS2API # DS2API
@@ -10,6 +10,7 @@
[![Release](https://img.shields.io/github/v/release/CJackHwang/ds2api?display_name=tag)](https://github.com/CJackHwang/ds2api/releases) [![Release](https://img.shields.io/github/v/release/CJackHwang/ds2api?display_name=tag)](https://github.com/CJackHwang/ds2api/releases)
[![Docker](https://img.shields.io/badge/docker-ready-blue.svg)](DEPLOY.md) [![Docker](https://img.shields.io/badge/docker-ready-blue.svg)](DEPLOY.md)
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/L4CFHP) [![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/L4CFHP)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https://github.com/CJackHwang/ds2api)
语言 / Language: [中文](README.MD) | [English](README.en.md) 语言 / Language: [中文](README.MD) | [English](README.en.md)

View File

@@ -1,5 +1,5 @@
<p align="center"> <p align="center">
<img src="assets/ds2api-icon.svg" width="128" height="128" alt="DS2API icon" /> <img src="webui/public/ds2api-favicon.svg" width="128" height="128" alt="DS2API icon" />
</p> </p>
# DS2API # DS2API
@@ -10,6 +10,7 @@
[![Release](https://img.shields.io/github/v/release/CJackHwang/ds2api?display_name=tag)](https://github.com/CJackHwang/ds2api/releases) [![Release](https://img.shields.io/github/v/release/CJackHwang/ds2api?display_name=tag)](https://github.com/CJackHwang/ds2api/releases)
[![Docker](https://img.shields.io/badge/docker-ready-blue.svg)](DEPLOY.en.md) [![Docker](https://img.shields.io/badge/docker-ready-blue.svg)](DEPLOY.en.md)
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/L4CFHP) [![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/L4CFHP)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https://github.com/CJackHwang/ds2api)
Language: [中文](README.MD) | [English](README.en.md) Language: [中文](README.MD) | [English](README.en.md)

1
VERSION Normal file
View File

@@ -0,0 +1 @@
0.1.0

View File

@@ -1,63 +0,0 @@
<svg width="512" height="512" viewBox="0 0 512 512" fill="none" xmlns="http://www.w3.org/2000/svg" role="img" aria-label="DS2API icon">
<defs>
<linearGradient id="bg" x1="96" y1="96" x2="416" y2="416" gradientUnits="userSpaceOnUse">
<stop offset="0" stop-color="#06162D" />
<stop offset="0.6" stop-color="#0A3A6A" />
<stop offset="1" stop-color="#00B4D8" />
</linearGradient>
<radialGradient id="glow" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(256 180) rotate(90) scale(260)">
<stop offset="0" stop-color="#FFFFFF" stop-opacity="0.18" />
<stop offset="1" stop-color="#FFFFFF" stop-opacity="0" />
</radialGradient>
<linearGradient id="whale" x1="180" y1="140" x2="360" y2="360" gradientUnits="userSpaceOnUse">
<stop offset="0" stop-color="#EAF7FF" />
<stop offset="1" stop-color="#BDEBFF" />
</linearGradient>
</defs>
<circle cx="256" cy="256" r="240" fill="url(#bg)" />
<circle cx="256" cy="256" r="240" fill="url(#glow)" />
<circle cx="256" cy="256" r="240" stroke="#FFFFFF" stroke-opacity="0.14" stroke-width="8" />
<!-- subtle waves -->
<path d="M104 338 C156 308 204 366 256 334 C308 302 356 360 408 330" stroke="#FFFFFF" stroke-opacity="0.16" stroke-width="12" stroke-linecap="round" />
<path d="M124 372 C174 344 212 396 256 372 C300 348 338 396 388 368" stroke="#FFFFFF" stroke-opacity="0.12" stroke-width="10" stroke-linecap="round" />
<!-- whale tail (DeepSeek-inspired element, original design) -->
<path
d="M256 162
C228 124 184 118 156 146
C132 170 138 206 162 230
C190 262 230 252 252 220
C254 218 255 216 256 214
C257 216 258 218 260 220
C282 252 322 262 350 230
C374 206 380 170 356 146
C328 118 284 124 256 162 Z"
fill="url(#whale)"
/>
<rect x="236" y="214" width="40" height="168" rx="20" fill="url(#whale)" />
<!-- API nodes -->
<g opacity="0.55" stroke="#FFFFFF" stroke-opacity="0.35" stroke-width="6" stroke-linecap="round">
<path d="M156 236 L208 206" />
<path d="M356 236 L304 206" />
<path d="M208 206 L232 172" />
<circle cx="156" cy="236" r="10" fill="#FFFFFF" fill-opacity="0.28" />
<circle cx="208" cy="206" r="10" fill="#FFFFFF" fill-opacity="0.28" />
<circle cx="232" cy="172" r="10" fill="#FFFFFF" fill-opacity="0.28" />
<circle cx="304" cy="206" r="10" fill="#FFFFFF" fill-opacity="0.28" />
<circle cx="356" cy="236" r="10" fill="#FFFFFF" fill-opacity="0.28" />
</g>
<!-- tiny sparkle -->
<path
d="M378 164
C372 170 366 174 358 176
C366 178 372 182 378 188
C380 180 384 176 392 176
C384 174 380 170 378 164 Z"
fill="#FFFFFF"
fill-opacity="0.32"
/>
</svg>

Before

Width:  |  Height:  |  Size: 2.7 KiB

View File

@@ -1,18 +1,14 @@
services: services:
ds2api: ds2api:
build: . image: ghcr.io/cjackhwang/ds2api:latest
image: ds2api:latest container_name: ds2api
container_name: ds2api restart: always
ports: ports:
- "${PORT:-5001}:${PORT:-5001}" - "6011:5001"
env_file: volumes:
- .env - ./config.json:/app/config.json # 配置文件
environment: - ./.env:/app/.env # 环境变量
- HOST=0.0.0.0 environment:
restart: unless-stopped - TZ=Asia/Shanghai
healthcheck: - LOG_LEVEL=INFO
test: ["CMD", "/usr/local/bin/busybox", "wget", "-qO-", "http://localhost:${PORT:-5001}/healthz"] - DS2API_ADMIN_KEY=${DS2API_ADMIN_KEY:-ds2api}
interval: 30s
timeout: 10s
retries: 3
start_period: 10s

View File

@@ -98,7 +98,7 @@ func (s *chatStreamRuntime) sendDone() {
func (s *chatStreamRuntime) finalize(finishReason string) { func (s *chatStreamRuntime) finalize(finishReason string) {
finalThinking := s.thinking.String() finalThinking := s.thinking.String()
finalText := s.text.String() finalText := s.text.String()
detected := util.ParseToolCalls(finalText, s.toolNames) detected := util.ParseStandaloneToolCalls(finalText, s.toolNames)
if len(detected) > 0 && !s.toolCallsDoneEmitted { if len(detected) > 0 && !s.toolCallsDoneEmitted {
finishReason = "tool_calls" finishReason = "tool_calls"
delta := map[string]any{ delta := map[string]any{

View File

@@ -3,6 +3,7 @@ package openai
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@@ -210,7 +211,7 @@ func TestHandleNonStreamUnknownToolNotIntercepted(t *testing.T) {
} }
} }
func TestHandleNonStreamEmbeddedToolCallExampleIntercepted(t *testing.T) { func TestHandleNonStreamEmbeddedToolCallExampleRemainsText(t *testing.T) {
h := &Handler{} h := &Handler{}
resp := makeSSEHTTPResponse( resp := makeSSEHTTPResponse(
`data: {"p":"response/content","v":"下面是示例:"}`, `data: {"p":"response/content","v":"下面是示例:"}`,
@@ -228,16 +229,16 @@ func TestHandleNonStreamEmbeddedToolCallExampleIntercepted(t *testing.T) {
out := decodeJSONBody(t, rec.Body.String()) out := decodeJSONBody(t, rec.Body.String())
choices, _ := out["choices"].([]any) choices, _ := out["choices"].([]any)
choice, _ := choices[0].(map[string]any) choice, _ := choices[0].(map[string]any)
if choice["finish_reason"] != "tool_calls" { if choice["finish_reason"] != "stop" {
t.Fatalf("expected finish_reason=tool_calls, got %#v", choice["finish_reason"]) t.Fatalf("expected finish_reason=stop, got %#v", choice["finish_reason"])
} }
msg, _ := choice["message"].(map[string]any) msg, _ := choice["message"].(map[string]any)
toolCalls, _ := msg["tool_calls"].([]any) if _, ok := msg["tool_calls"]; ok {
if len(toolCalls) == 0 { t.Fatalf("did not expect tool_calls field for embedded example: %#v", msg["tool_calls"])
t.Fatalf("expected tool_calls field for embedded example: %#v", msg["tool_calls"])
} }
if msg["content"] != nil { content, _ := msg["content"].(string)
t.Fatalf("expected content nil when tool_calls detected, got %#v", msg["content"]) if !strings.Contains(content, "下面是示例:") || !strings.Contains(content, "请勿执行。") || !strings.Contains(content, `"tool_calls"`) {
t.Fatalf("expected embedded example to remain plain text, got %#v", content)
} }
} }
@@ -315,6 +316,36 @@ func TestHandleStreamToolCallInterceptsWithoutRawContentLeak(t *testing.T) {
} }
} }
func TestHandleStreamToolCallLargeArgumentsStillIntercepted(t *testing.T) {
h := &Handler{}
large := strings.Repeat("a", 9000)
payload := fmt.Sprintf(`{"tool_calls":[{"name":"search","input":{"q":"%s"}}]}`, large)
splitAt := len(payload) / 2
resp := makeSSEHTTPResponse(
fmt.Sprintf(`data: {"p":"response/content","v":%q}`, payload[:splitAt]),
fmt.Sprintf(`data: {"p":"response/content","v":%q}`, payload[splitAt:]),
`data: [DONE]`,
)
rec := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", nil)
h.handleStream(rec, req, resp, "cid3-large", "deepseek-chat", "prompt", false, false, []string{"search"})
frames, done := parseSSEDataFrames(t, rec.Body.String())
if !done {
t.Fatalf("expected [DONE], body=%s", rec.Body.String())
}
if !streamHasToolCallsDelta(frames) {
t.Fatalf("expected tool_calls delta, body=%s", rec.Body.String())
}
if streamHasRawToolJSONContent(frames) {
t.Fatalf("raw tool_calls JSON leaked in content delta: %s", rec.Body.String())
}
if streamFinishReason(frames) != "tool_calls" {
t.Fatalf("expected finish_reason=tool_calls, body=%s", rec.Body.String())
}
}
func TestHandleStreamReasonerToolCallInterceptsWithoutRawContentLeak(t *testing.T) { func TestHandleStreamReasonerToolCallInterceptsWithoutRawContentLeak(t *testing.T) {
h := &Handler{} h := &Handler{}
resp := makeSSEHTTPResponse( resp := makeSSEHTTPResponse(
@@ -482,8 +513,8 @@ func TestHandleStreamToolCallMixedWithPlainTextSegments(t *testing.T) {
if !done { if !done {
t.Fatalf("expected [DONE], body=%s", rec.Body.String()) t.Fatalf("expected [DONE], body=%s", rec.Body.String())
} }
if !streamHasToolCallsDelta(frames) { if streamHasToolCallsDelta(frames) {
t.Fatalf("expected tool_calls delta in mixed prose stream, body=%s", rec.Body.String()) t.Fatalf("did not expect tool_calls delta in mixed prose stream, body=%s", rec.Body.String())
} }
content := strings.Builder{} content := strings.Builder{}
for _, frame := range frames { for _, frame := range frames {
@@ -500,15 +531,15 @@ func TestHandleStreamToolCallMixedWithPlainTextSegments(t *testing.T) {
if !strings.Contains(got, "下面是示例:") || !strings.Contains(got, "请勿执行。") { if !strings.Contains(got, "下面是示例:") || !strings.Contains(got, "请勿执行。") {
t.Fatalf("expected pre/post plain text to pass sieve, got=%q", got) t.Fatalf("expected pre/post plain text to pass sieve, got=%q", got)
} }
if strings.Contains(strings.ToLower(got), `"tool_calls"`) { if !strings.Contains(strings.ToLower(got), `"tool_calls"`) {
t.Fatalf("expected no raw tool_calls json leak in content, got=%q", got) t.Fatalf("expected embedded tool json to remain text in strict mode, got=%q", got)
} }
if streamFinishReason(frames) != "tool_calls" { if streamFinishReason(frames) != "stop" {
t.Fatalf("expected finish_reason=tool_calls for mixed prose, body=%s", rec.Body.String()) t.Fatalf("expected finish_reason=stop for mixed prose, body=%s", rec.Body.String())
} }
} }
func TestHandleStreamToolCallAfterLeadingTextStillIntercepted(t *testing.T) { func TestHandleStreamToolCallAfterLeadingTextRemainsText(t *testing.T) {
h := &Handler{} h := &Handler{}
resp := makeSSEHTTPResponse( resp := makeSSEHTTPResponse(
`data: {"p":"response/content","v":"我将调用工具。"}`, `data: {"p":"response/content","v":"我将调用工具。"}`,
@@ -524,8 +555,8 @@ func TestHandleStreamToolCallAfterLeadingTextStillIntercepted(t *testing.T) {
if !done { if !done {
t.Fatalf("expected [DONE], body=%s", rec.Body.String()) t.Fatalf("expected [DONE], body=%s", rec.Body.String())
} }
if !streamHasToolCallsDelta(frames) { if streamHasToolCallsDelta(frames) {
t.Fatalf("expected tool_calls delta, body=%s", rec.Body.String()) t.Fatalf("did not expect tool_calls delta, body=%s", rec.Body.String())
} }
content := strings.Builder{} content := strings.Builder{}
for _, frame := range frames { for _, frame := range frames {
@@ -542,15 +573,15 @@ func TestHandleStreamToolCallAfterLeadingTextStillIntercepted(t *testing.T) {
if !strings.Contains(got, "我将调用工具。") { if !strings.Contains(got, "我将调用工具。") {
t.Fatalf("expected leading text to keep streaming, got=%q", got) t.Fatalf("expected leading text to keep streaming, got=%q", got)
} }
if strings.Contains(strings.ToLower(got), "tool_calls") { if !strings.Contains(strings.ToLower(got), "tool_calls") {
t.Fatalf("unexpected raw tool json leak, got=%q", got) t.Fatalf("expected tool_calls example text preserved, got=%q", got)
} }
if streamFinishReason(frames) != "tool_calls" { if streamFinishReason(frames) != "stop" {
t.Fatalf("expected finish_reason=tool_calls, body=%s", rec.Body.String()) t.Fatalf("expected finish_reason=stop, body=%s", rec.Body.String())
} }
} }
func TestHandleStreamToolCallWithSameChunkTrailingTextStillIntercepted(t *testing.T) { func TestHandleStreamToolCallWithSameChunkTrailingTextRemainsText(t *testing.T) {
h := &Handler{} h := &Handler{}
resp := makeSSEHTTPResponse( resp := makeSSEHTTPResponse(
`data: {"p":"response/content","v":"{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go\"}}]}接下来我会继续说明。"}`, `data: {"p":"response/content","v":"{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go\"}}]}接下来我会继续说明。"}`,
@@ -565,8 +596,8 @@ func TestHandleStreamToolCallWithSameChunkTrailingTextStillIntercepted(t *testin
if !done { if !done {
t.Fatalf("expected [DONE], body=%s", rec.Body.String()) t.Fatalf("expected [DONE], body=%s", rec.Body.String())
} }
if !streamHasToolCallsDelta(frames) { if streamHasToolCallsDelta(frames) {
t.Fatalf("expected tool_calls delta, body=%s", rec.Body.String()) t.Fatalf("did not expect tool_calls delta, body=%s", rec.Body.String())
} }
content := strings.Builder{} content := strings.Builder{}
for _, frame := range frames { for _, frame := range frames {
@@ -583,15 +614,15 @@ func TestHandleStreamToolCallWithSameChunkTrailingTextStillIntercepted(t *testin
if !strings.Contains(got, "接下来我会继续说明。") { if !strings.Contains(got, "接下来我会继续说明。") {
t.Fatalf("expected trailing plain text to be preserved, got=%q", got) t.Fatalf("expected trailing plain text to be preserved, got=%q", got)
} }
if strings.Contains(strings.ToLower(got), "tool_calls") { if !strings.Contains(strings.ToLower(got), "tool_calls") {
t.Fatalf("unexpected raw tool json leak, got=%q", got) t.Fatalf("expected tool_calls example text preserved, got=%q", got)
} }
if streamFinishReason(frames) != "tool_calls" { if streamFinishReason(frames) != "stop" {
t.Fatalf("expected finish_reason=tool_calls, body=%s", rec.Body.String()) t.Fatalf("expected finish_reason=stop, body=%s", rec.Body.String())
} }
} }
func TestHandleStreamToolCallKeyAppearsLateStillNoPrefixLeak(t *testing.T) { func TestHandleStreamToolCallKeyAppearsLateRemainsText(t *testing.T) {
h := &Handler{} h := &Handler{}
spaces := strings.Repeat(" ", 200) spaces := strings.Repeat(" ", 200)
resp := makeSSEHTTPResponse( resp := makeSSEHTTPResponse(
@@ -609,11 +640,8 @@ func TestHandleStreamToolCallKeyAppearsLateStillNoPrefixLeak(t *testing.T) {
if !done { if !done {
t.Fatalf("expected [DONE], body=%s", rec.Body.String()) t.Fatalf("expected [DONE], body=%s", rec.Body.String())
} }
if !streamHasToolCallsDelta(frames) { if streamHasToolCallsDelta(frames) {
t.Fatalf("expected tool_calls delta, body=%s", rec.Body.String()) t.Fatalf("did not expect tool_calls delta, body=%s", rec.Body.String())
}
if streamHasRawToolJSONContent(frames) {
t.Fatalf("raw tool_calls JSON leaked in content delta: %s", rec.Body.String())
} }
content := strings.Builder{} content := strings.Builder{}
for _, frame := range frames { for _, frame := range frames {
@@ -627,14 +655,14 @@ func TestHandleStreamToolCallKeyAppearsLateStillNoPrefixLeak(t *testing.T) {
} }
} }
got := content.String() got := content.String()
if strings.Contains(got, "{") { if !strings.Contains(strings.ToLower(got), "tool_calls") || !strings.Contains(got, "{") {
t.Fatalf("unexpected suspicious prefix leak in content: %q", got) t.Fatalf("expected embedded tool json to remain in text, got=%q", got)
} }
if !strings.Contains(got, "后置正文C。") { if !strings.Contains(got, "后置正文C。") {
t.Fatalf("expected stream to continue after tool json convergence, got=%q", got) t.Fatalf("expected stream to continue after tool json convergence, got=%q", got)
} }
if streamFinishReason(frames) != "tool_calls" { if streamFinishReason(frames) != "stop" {
t.Fatalf("expected finish_reason=tool_calls, body=%s", rec.Body.String()) t.Fatalf("expected finish_reason=stop, body=%s", rec.Body.String())
} }
} }
@@ -712,7 +740,7 @@ func TestHandleStreamIncompleteCapturedToolJSONFlushesAsTextOnFinalize(t *testin
} }
} }
func TestHandleStreamToolCallArgumentsEmitIncrementally(t *testing.T) { func TestHandleStreamToolCallArgumentsEmitAsSingleCompletedChunk(t *testing.T) {
h := &Handler{} h := &Handler{}
resp := makeSSEHTTPResponse( resp := makeSSEHTTPResponse(
`data: {"p":"response/content","v":"{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go"}`, `data: {"p":"response/content","v":"{\"tool_calls\":[{\"name\":\"search\",\"input\":{\"q\":\"go"}`,
@@ -735,8 +763,8 @@ func TestHandleStreamToolCallArgumentsEmitIncrementally(t *testing.T) {
t.Fatalf("raw tool_calls JSON leaked in content delta: %s", rec.Body.String()) t.Fatalf("raw tool_calls JSON leaked in content delta: %s", rec.Body.String())
} }
argChunks := streamToolCallArgumentChunks(frames) argChunks := streamToolCallArgumentChunks(frames)
if len(argChunks) < 2 { if len(argChunks) == 0 {
t.Fatalf("expected incremental arguments chunks, got=%v body=%s", argChunks, rec.Body.String()) t.Fatalf("expected tool call arguments chunk, got=%v body=%s", argChunks, rec.Body.String())
} }
joined := strings.Join(argChunks, "") joined := strings.Join(argChunks, "")
if !strings.Contains(joined, `"q":"golang"`) || !strings.Contains(joined, `"page":1`) { if !strings.Contains(joined, `"q":"golang"`) || !strings.Contains(joined, `"page":1`) {

View File

@@ -3,7 +3,6 @@ package openai
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"strings" "strings"
"ds2api/internal/config" "ds2api/internal/config"
@@ -175,30 +174,11 @@ func normalizeToolArgumentString(raw string) string {
if trimmed == "" { if trimmed == "" {
return "" return ""
} }
if !looksLikeConcatenatedJSON(trimmed) { if looksLikeConcatenatedJSON(trimmed) {
return trimmed // Keep original payload to avoid silent argument rewrites.
return raw
} }
dec := json.NewDecoder(strings.NewReader(trimmed)) return trimmed
values := make([]any, 0, 2)
for {
var v any
if err := dec.Decode(&v); err != nil {
if err == io.EOF {
break
}
return trimmed
}
values = append(values, v)
}
if len(values) < 2 {
return trimmed
}
last := values[len(values)-1]
b, err := json.Marshal(last)
if err != nil || len(b) == 0 {
return trimmed
}
return string(b)
} }
func marshalToPromptString(v any) string { func marshalToPromptString(v any) string {

View File

@@ -168,7 +168,7 @@ func TestNormalizeOpenAIMessagesForPrompt_AssistantMultipleToolCallsRemainSepara
} }
} }
func TestNormalizeOpenAIMessagesForPrompt_RepairsConcatenatedToolArguments(t *testing.T) { func TestNormalizeOpenAIMessagesForPrompt_PreservesConcatenatedToolArguments(t *testing.T) {
raw := []any{ raw := []any{
map[string]any{ map[string]any{
"role": "assistant", "role": "assistant",
@@ -189,10 +189,7 @@ func TestNormalizeOpenAIMessagesForPrompt_RepairsConcatenatedToolArguments(t *te
t.Fatalf("expected one normalized message, got %d", len(normalized)) t.Fatalf("expected one normalized message, got %d", len(normalized))
} }
content, _ := normalized[0]["content"].(string) content, _ := normalized[0]["content"].(string)
if !strings.Contains(content, `function.arguments: {"query":"测试工具调用"}`) { if !strings.Contains(content, `function.arguments: {}{"query":"测试工具调用"}`) {
t.Fatalf("expected repaired arguments in tool history, got %q", content) t.Fatalf("expected original concatenated arguments in tool history, got %q", content)
}
if strings.Contains(content, `{}{"query":"测试工具调用"}`) {
t.Fatalf("expected concatenated JSON to be repaired, got %q", content)
} }
} }

View File

@@ -135,7 +135,7 @@ func TestNormalizeResponsesInputAsMessagesFunctionCallItem(t *testing.T) {
} }
} }
func TestNormalizeResponsesInputAsMessagesFunctionCallItemRepairsConcatenatedArguments(t *testing.T) { func TestNormalizeResponsesInputAsMessagesFunctionCallItemPreservesConcatenatedArguments(t *testing.T) {
msgs := normalizeResponsesInputAsMessages([]any{ msgs := normalizeResponsesInputAsMessages([]any{
map[string]any{ map[string]any{
"type": "function_call", "type": "function_call",
@@ -151,8 +151,8 @@ func TestNormalizeResponsesInputAsMessagesFunctionCallItemRepairsConcatenatedArg
toolCalls, _ := m["tool_calls"].([]any) toolCalls, _ := m["tool_calls"].([]any)
call, _ := toolCalls[0].(map[string]any) call, _ := toolCalls[0].(map[string]any)
fn, _ := call["function"].(map[string]any) fn, _ := call["function"].(map[string]any)
if fn["arguments"] != `{"q":"golang"}` { if fn["arguments"] != `{}{"q":"golang"}` {
t.Fatalf("expected concatenated call arguments repaired, got %#v", fn["arguments"]) t.Fatalf("expected original concatenated call arguments preserved, got %#v", fn["arguments"])
} }
} }

View File

@@ -113,15 +113,10 @@ func (h *Handler) handleResponsesNonStream(w http.ResponseWriter, resp *http.Res
return return
} }
result := sse.CollectStream(resp, thinkingEnabled, true) result := sse.CollectStream(resp, thinkingEnabled, true)
textParsed := util.ParseToolCallsDetailed(result.Text, toolNames) textParsed := util.ParseStandaloneToolCallsDetailed(result.Text, toolNames)
thinkingParsed := util.ParseToolCallsDetailed(result.Thinking, toolNames)
logResponsesToolPolicyRejection(traceID, toolChoice, textParsed, "text") logResponsesToolPolicyRejection(traceID, toolChoice, textParsed, "text")
logResponsesToolPolicyRejection(traceID, toolChoice, thinkingParsed, "thinking")
callCount := len(textParsed.Calls) callCount := len(textParsed.Calls)
if callCount == 0 {
callCount = len(thinkingParsed.Calls)
}
if toolChoice.IsRequired() && callCount == 0 { if toolChoice.IsRequired() && callCount == 0 {
writeOpenAIErrorWithCode(w, http.StatusUnprocessableEntity, "tool_choice requires at least one valid tool call.", "tool_choice_violation") writeOpenAIErrorWithCode(w, http.StatusUnprocessableEntity, "tool_choice requires at least one valid tool call.", "tool_choice_violation")
return return

View File

@@ -102,16 +102,11 @@ func (s *responsesStreamRuntime) finalize() {
if s.bufferToolContent { if s.bufferToolContent {
s.processToolStreamEvents(flushToolSieve(&s.sieve, s.toolNames), true) s.processToolStreamEvents(flushToolSieve(&s.sieve, s.toolNames), true)
s.processToolStreamEvents(flushToolSieve(&s.thinkingSieve, s.toolNames), false)
} }
textParsed := util.ParseToolCallsDetailed(finalText, s.toolNames) textParsed := util.ParseStandaloneToolCallsDetailed(finalText, s.toolNames)
thinkingParsed := util.ParseToolCallsDetailed(finalThinking, s.toolNames)
detected := textParsed.Calls detected := textParsed.Calls
if len(detected) == 0 { s.logToolPolicyRejections(textParsed)
detected = thinkingParsed.Calls
}
s.logToolPolicyRejections(textParsed, thinkingParsed)
if len(detected) > 0 { if len(detected) > 0 {
s.toolCallsEmitted = true s.toolCallsEmitted = true
@@ -157,7 +152,7 @@ func (s *responsesStreamRuntime) finalize() {
s.sendDone() s.sendDone()
} }
func (s *responsesStreamRuntime) logToolPolicyRejections(textParsed, thinkingParsed util.ToolCallParseResult) { func (s *responsesStreamRuntime) logToolPolicyRejections(textParsed util.ToolCallParseResult) {
logRejected := func(parsed util.ToolCallParseResult, channel string) { logRejected := func(parsed util.ToolCallParseResult, channel string) {
rejected := filteredRejectedToolNamesForLog(parsed.RejectedToolNames) rejected := filteredRejectedToolNamesForLog(parsed.RejectedToolNames)
if !parsed.RejectedByPolicy || len(rejected) == 0 { if !parsed.RejectedByPolicy || len(rejected) == 0 {
@@ -172,7 +167,6 @@ func (s *responsesStreamRuntime) logToolPolicyRejections(textParsed, thinkingPar
) )
} }
logRejected(textParsed, "text") logRejected(textParsed, "text")
logRejected(thinkingParsed, "thinking")
} }
func (s *responsesStreamRuntime) hasFunctionCallDone() bool { func (s *responsesStreamRuntime) hasFunctionCallDone() bool {
@@ -207,9 +201,6 @@ func (s *responsesStreamRuntime) onParsed(parsed sse.LineResult) streamengine.Pa
} }
s.thinking.WriteString(p.Text) s.thinking.WriteString(p.Text)
s.sendEvent("response.reasoning.delta", openaifmt.BuildResponsesReasoningDeltaPayload(s.responseID, p.Text)) s.sendEvent("response.reasoning.delta", openaifmt.BuildResponsesReasoningDeltaPayload(s.responseID, p.Text))
if s.bufferToolContent {
s.processToolStreamEvents(processToolSieveChunk(&s.thinkingSieve, p.Text, s.toolNames), false)
}
continue continue
} }

View File

@@ -99,9 +99,6 @@ func TestHandleResponsesStreamUsesOfficialOutputItemEvents(t *testing.T) {
if !strings.Contains(body, "event: response.output_item.done") { if !strings.Contains(body, "event: response.output_item.done") {
t.Fatalf("expected response.output_item.done event, body=%s", body) t.Fatalf("expected response.output_item.done event, body=%s", body)
} }
if !strings.Contains(body, "event: response.function_call_arguments.delta") {
t.Fatalf("expected response.function_call_arguments.delta event, body=%s", body)
}
if !strings.Contains(body, "event: response.function_call_arguments.done") { if !strings.Contains(body, "event: response.function_call_arguments.done") {
t.Fatalf("expected response.function_call_arguments.done event, body=%s", body) t.Fatalf("expected response.function_call_arguments.done event, body=%s", body)
} }
@@ -266,7 +263,7 @@ func TestHandleResponsesStreamOutputTextDeltaCarriesItemIndexes(t *testing.T) {
} }
} }
func TestHandleResponsesStreamThinkingTextAndToolUseDistinctOutputIndexes(t *testing.T) { func TestHandleResponsesStreamThinkingAndMixedToolExampleRemainMessageOnly(t *testing.T) {
h := &Handler{} h := &Handler{}
req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil) req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
@@ -291,23 +288,12 @@ func TestHandleResponsesStreamThinkingTextAndToolUseDistinctOutputIndexes(t *tes
h.handleResponsesStream(rec, req, resp, "owner-a", "resp_test", "deepseek-reasoner", "prompt", true, false, []string{"read_file"}, util.DefaultToolChoicePolicy(), "") h.handleResponsesStream(rec, req, resp, "owner-a", "resp_test", "deepseek-reasoner", "prompt", true, false, []string{"read_file"}, util.DefaultToolChoicePolicy(), "")
addedPayloads := extractAllSSEEventPayloads(rec.Body.String(), "response.output_item.added") addedPayloads := extractAllSSEEventPayloads(rec.Body.String(), "response.output_item.added")
if len(addedPayloads) < 2 { if len(addedPayloads) != 1 {
t.Fatalf("expected message + function_call output_item.added events, got %d body=%s", len(addedPayloads), rec.Body.String()) t.Fatalf("expected only one message output_item.added event, got %d body=%s", len(addedPayloads), rec.Body.String())
} }
item, _ := addedPayloads[0]["item"].(map[string]any)
indexes := map[int]struct{}{} if asString(item["type"]) != "message" {
typeByIndex := map[int]string{} t.Fatalf("expected only message output item in strict mode, got %#v", item)
addedIDs := map[string]string{}
for _, payload := range addedPayloads {
item, _ := payload["item"].(map[string]any)
itemType := strings.TrimSpace(asString(item["type"]))
outputIndex := int(asFloat(payload["output_index"]))
if _, exists := indexes[outputIndex]; exists {
t.Fatalf("found duplicated output_index=%d for item types=%q and %q payload=%#v", outputIndex, typeByIndex[outputIndex], itemType, payload)
}
indexes[outputIndex] = struct{}{}
typeByIndex[outputIndex] = itemType
addedIDs[itemType] = strings.TrimSpace(asString(payload["item_id"]))
} }
completedPayload, ok := extractSSEEventPayload(rec.Body.String(), "response.completed") completedPayload, ok := extractSSEEventPayload(rec.Body.String(), "response.completed")
@@ -316,21 +302,15 @@ func TestHandleResponsesStreamThinkingTextAndToolUseDistinctOutputIndexes(t *tes
} }
responseObj, _ := completedPayload["response"].(map[string]any) responseObj, _ := completedPayload["response"].(map[string]any)
output, _ := responseObj["output"].([]any) output, _ := responseObj["output"].([]any)
found := map[string]bool{}
for _, item := range output { for _, item := range output {
m, _ := item.(map[string]any) m, _ := item.(map[string]any)
itemType := strings.TrimSpace(asString(m["type"])) if m == nil {
itemID := strings.TrimSpace(asString(m["id"]))
if itemType == "" || itemID == "" {
continue continue
} }
if wantID := strings.TrimSpace(addedIDs[itemType]); wantID != "" && wantID == itemID { if asString(m["type"]) == "function_call" {
found[itemType] = true t.Fatalf("did not expect function_call output for mixed prose tool example, output=%#v", output)
} }
} }
if !found["message"] || !found["function_call"] {
t.Fatalf("expected completed output to contain streamed message/function_call item ids, found=%#v output=%#v", found, output)
}
} }
func TestHandleResponsesStreamToolChoiceNoneRejectsFunctionCall(t *testing.T) { func TestHandleResponsesStreamToolChoiceNoneRejectsFunctionCall(t *testing.T) {
@@ -360,7 +340,7 @@ func TestHandleResponsesStreamToolChoiceNoneRejectsFunctionCall(t *testing.T) {
} }
} }
func TestHandleResponsesStreamMalformedToolJSONClosesInProgressFunctionItem(t *testing.T) { func TestHandleResponsesStreamMalformedToolJSONFallsBackToText(t *testing.T) {
h := &Handler{} h := &Handler{}
req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil) req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
@@ -373,7 +353,7 @@ func TestHandleResponsesStreamMalformedToolJSONClosesInProgressFunctionItem(t *t
return "data: " + string(b) + "\n" return "data: " + string(b) + "\n"
} }
// invalid JSON (NaN) can still trigger incremental tool deltas before final parse rejects it // invalid JSON (NaN) should remain plain text in strict mode.
streamBody := sseLine(`{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"},"x":NaN}]}`) + "data: [DONE]\n" streamBody := sseLine(`{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"},"x":NaN}]}`) + "data: [DONE]\n"
resp := &http.Response{ resp := &http.Response{
StatusCode: http.StatusOK, StatusCode: http.StatusOK,
@@ -382,14 +362,11 @@ func TestHandleResponsesStreamMalformedToolJSONClosesInProgressFunctionItem(t *t
h.handleResponsesStream(rec, req, resp, "owner-a", "resp_test", "deepseek-chat", "prompt", false, false, []string{"read_file"}, util.DefaultToolChoicePolicy(), "") h.handleResponsesStream(rec, req, resp, "owner-a", "resp_test", "deepseek-chat", "prompt", false, false, []string{"read_file"}, util.DefaultToolChoicePolicy(), "")
body := rec.Body.String() body := rec.Body.String()
if !strings.Contains(body, "event: response.function_call_arguments.delta") { if strings.Contains(body, "event: response.function_call_arguments.delta") || strings.Contains(body, "event: response.function_call_arguments.done") {
t.Fatalf("expected response.function_call_arguments.delta event for malformed payload, body=%s", body) t.Fatalf("did not expect function_call events for malformed payload in strict mode, body=%s", body)
} }
if !strings.Contains(body, "event: response.function_call_arguments.done") { if !strings.Contains(body, "event: response.output_text.delta") {
t.Fatalf("expected runtime to close in-progress function_call with done event, body=%s", body) t.Fatalf("expected response.output_text.delta for malformed payload, body=%s", body)
}
if !strings.Contains(body, "event: response.output_item.done") {
t.Fatalf("expected runtime to close function output item, body=%s", body)
} }
if !strings.Contains(body, "event: response.completed") { if !strings.Contains(body, "event: response.completed") {
t.Fatalf("expected response.completed event, body=%s", body) t.Fatalf("expected response.completed event, body=%s", body)
@@ -430,6 +407,42 @@ func TestHandleResponsesStreamRequiredToolChoiceFailure(t *testing.T) {
} }
} }
func TestHandleResponsesStreamRequiredToolChoiceIgnoresThinkingToolPayload(t *testing.T) {
h := &Handler{}
req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
rec := httptest.NewRecorder()
sseLine := func(path, value string) string {
b, _ := json.Marshal(map[string]any{
"p": path,
"v": value,
})
return "data: " + string(b) + "\n"
}
streamBody := sseLine("response/thinking_content", `{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}`) +
sseLine("response/content", "plain text only") +
"data: [DONE]\n"
resp := &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(strings.NewReader(streamBody)),
}
policy := util.ToolChoicePolicy{
Mode: util.ToolChoiceRequired,
Allowed: map[string]struct{}{"read_file": {}},
}
h.handleResponsesStream(rec, req, resp, "owner-a", "resp_test", "deepseek-chat", "prompt", true, false, []string{"read_file"}, policy, "")
body := rec.Body.String()
if !strings.Contains(body, "event: response.failed") {
t.Fatalf("expected response.failed event for required tool_choice violation, body=%s", body)
}
if strings.Contains(body, "event: response.completed") {
t.Fatalf("did not expect response.completed after failure, body=%s", body)
}
}
func TestHandleResponsesStreamRequiredMalformedToolPayloadFails(t *testing.T) { func TestHandleResponsesStreamRequiredMalformedToolPayloadFails(t *testing.T) {
h := &Handler{} h := &Handler{}
req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil) req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil)
@@ -516,6 +529,33 @@ func TestHandleResponsesNonStreamRequiredToolChoiceViolation(t *testing.T) {
} }
} }
func TestHandleResponsesNonStreamRequiredToolChoiceIgnoresThinkingToolPayload(t *testing.T) {
h := &Handler{}
rec := httptest.NewRecorder()
resp := &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(strings.NewReader(
`data: {"p":"response/thinking_content","v":"{\"tool_calls\":[{\"name\":\"read_file\",\"input\":{\"path\":\"README.MD\"}}]}"}` + "\n" +
`data: {"p":"response/content","v":"plain text only"}` + "\n" +
`data: [DONE]` + "\n",
)),
}
policy := util.ToolChoicePolicy{
Mode: util.ToolChoiceRequired,
Allowed: map[string]struct{}{"read_file": {}},
}
h.handleResponsesNonStream(rec, resp, "owner-a", "resp_test", "deepseek-chat", "prompt", true, []string{"read_file"}, policy, "")
if rec.Code != http.StatusUnprocessableEntity {
t.Fatalf("expected 422 for required tool_choice violation, got %d body=%s", rec.Code, rec.Body.String())
}
out := decodeJSONBody(t, rec.Body.String())
errObj, _ := out["error"].(map[string]any)
if asString(errObj["code"]) != "tool_choice_violation" {
t.Fatalf("expected code=tool_choice_violation, got %#v", out)
}
}
func TestHandleResponsesNonStreamToolChoiceNoneRejectsFunctionCall(t *testing.T) { func TestHandleResponsesNonStreamToolChoiceNoneRejectsFunctionCall(t *testing.T) {
h := &Handler{} h := &Handler{}
rec := httptest.NewRecorder() rec := httptest.NewRecorder()

View File

@@ -167,19 +167,15 @@ func TestResponsesNonStreamMixedProseToolPayloadHandlerPath(t *testing.T) {
t.Fatalf("decode response failed: %v body=%s", err, rec.Body.String()) t.Fatalf("decode response failed: %v body=%s", err, rec.Body.String())
} }
outputText, _ := out["output_text"].(string) outputText, _ := out["output_text"].(string)
if outputText != "" { if outputText == "" {
t.Fatalf("expected output_text hidden for tool call payload, got %q", outputText) t.Fatalf("expected output_text preserved for mixed prose payload")
} }
output, _ := out["output"].([]any) output, _ := out["output"].([]any)
hasFunctionCall := false if len(output) != 1 {
for _, item := range output { t.Fatalf("expected one output item, got %#v", output)
m, _ := item.(map[string]any)
if m != nil && m["type"] == "function_call" {
hasFunctionCall = true
break
}
} }
if !hasFunctionCall { first, _ := output[0].(map[string]any)
t.Fatalf("expected function_call output item, got %#v", output) if first["type"] != "message" {
t.Fatalf("expected message output item, got %#v", output)
} }
} }

View File

@@ -14,6 +14,21 @@ func processToolSieveChunk(state *toolStreamSieveState, chunk string, toolNames
state.pending.WriteString(chunk) state.pending.WriteString(chunk)
} }
events := make([]toolStreamEvent, 0, 2) events := make([]toolStreamEvent, 0, 2)
if len(state.pendingToolCalls) > 0 {
pending := state.pending.String()
if strings.TrimSpace(pending) != "" {
content := state.pendingToolRaw + pending
state.pending.Reset()
state.pendingToolRaw = ""
state.pendingToolCalls = nil
state.noteText(content)
events = append(events, toolStreamEvent{Content: content})
} else {
// Wait for either more non-whitespace content (demote to plain text)
// or stream flush (promote to executable tool calls).
return events
}
}
for { for {
if state.capturing { if state.capturing {
@@ -21,32 +36,23 @@ func processToolSieveChunk(state *toolStreamSieveState, chunk string, toolNames
state.capture.WriteString(state.pending.String()) state.capture.WriteString(state.pending.String())
state.pending.Reset() state.pending.Reset()
} }
if deltas := buildIncrementalToolDeltas(state); len(deltas) > 0 {
events = append(events, toolStreamEvent{ToolCallDeltas: deltas})
}
prefix, calls, suffix, ready := consumeToolCapture(state, toolNames) prefix, calls, suffix, ready := consumeToolCapture(state, toolNames)
if !ready { if !ready {
if state.capture.Len() > toolSieveCaptureLimit {
content := state.capture.String()
state.capture.Reset()
state.capturing = false
state.resetIncrementalToolState()
state.noteText(content)
events = append(events, toolStreamEvent{Content: content})
continue
}
break break
} }
captured := state.capture.String()
state.capture.Reset() state.capture.Reset()
state.capturing = false state.capturing = false
state.resetIncrementalToolState() state.resetIncrementalToolState()
if len(calls) > 0 {
state.pendingToolRaw = captured
state.pendingToolCalls = calls
continue
}
if prefix != "" { if prefix != "" {
state.noteText(prefix) state.noteText(prefix)
events = append(events, toolStreamEvent{Content: prefix}) events = append(events, toolStreamEvent{Content: prefix})
} }
if len(calls) > 0 {
events = append(events, toolStreamEvent{ToolCalls: calls})
}
if suffix != "" { if suffix != "" {
state.pending.WriteString(suffix) state.pending.WriteString(suffix)
} }
@@ -89,6 +95,11 @@ func flushToolSieve(state *toolStreamSieveState, toolNames []string) []toolStrea
return nil return nil
} }
events := processToolSieveChunk(state, "", toolNames) events := processToolSieveChunk(state, "", toolNames)
if len(state.pendingToolCalls) > 0 {
events = append(events, toolStreamEvent{ToolCalls: state.pendingToolCalls})
state.pendingToolRaw = ""
state.pendingToolCalls = nil
}
if state.capturing { if state.capturing {
consumedPrefix, consumedCalls, consumedSuffix, ready := consumeToolCapture(state, toolNames) consumedPrefix, consumedCalls, consumedSuffix, ready := consumeToolCapture(state, toolNames)
if ready { if ready {
@@ -200,6 +211,11 @@ func consumeToolCapture(state *toolStreamSieveState, toolNames []string) (prefix
if insideCodeFence(state.recentTextTail + prefixPart) { if insideCodeFence(state.recentTextTail + prefixPart) {
return captured, nil, "", true return captured, nil, "", true
} }
// Strict mode: only standalone tool payloads are executable. If the
// payload is wrapped by non-whitespace prose, keep it as plain text.
if strings.TrimSpace(state.recentTextTail) != "" || strings.TrimSpace(prefixPart) != "" || strings.TrimSpace(suffixPart) != "" {
return captured, nil, "", true
}
parsed := util.ParseStandaloneToolCallsDetailed(obj, toolNames) parsed := util.ParseStandaloneToolCallsDetailed(obj, toolNames)
if len(parsed.Calls) == 0 { if len(parsed.Calls) == 0 {
if parsed.SawToolCallSyntax && parsed.RejectedByPolicy { if parsed.SawToolCallSyntax && parsed.RejectedByPolicy {

View File

@@ -7,17 +7,19 @@ import (
) )
type toolStreamSieveState struct { type toolStreamSieveState struct {
pending strings.Builder pending strings.Builder
capture strings.Builder capture strings.Builder
capturing bool capturing bool
recentTextTail string recentTextTail string
disableDeltas bool pendingToolRaw string
toolNameSent bool pendingToolCalls []util.ParsedToolCall
toolName string disableDeltas bool
toolArgsStart int toolNameSent bool
toolArgsSent int toolName string
toolArgsString bool toolArgsStart int
toolArgsDone bool toolArgsSent int
toolArgsString bool
toolArgsDone bool
} }
type toolStreamEvent struct { type toolStreamEvent struct {
@@ -32,7 +34,6 @@ type toolCallDelta struct {
Arguments string Arguments string
} }
const toolSieveCaptureLimit = 8 * 1024
const toolSieveContextTailLimit = 256 const toolSieveContextTailLimit = 256
func (s *toolStreamSieveState) resetIncrementalToolState() { func (s *toolStreamSieveState) resetIncrementalToolState() {

View File

@@ -4,6 +4,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"net/url"
"strings" "strings"
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
@@ -24,8 +25,21 @@ func (h *Handler) listAccounts(w http.ResponseWriter, r *http.Request) {
pageSize = 100 pageSize = 100
} }
accounts := h.Store.Snapshot().Accounts accounts := h.Store.Snapshot().Accounts
total := len(accounts)
reverseAccounts(accounts) reverseAccounts(accounts)
q := strings.TrimSpace(strings.ToLower(r.URL.Query().Get("q")))
if q != "" {
filtered := make([]config.Account, 0, len(accounts))
for _, acc := range accounts {
id := strings.ToLower(acc.Identifier())
if strings.Contains(id, q) ||
strings.Contains(strings.ToLower(acc.Email), q) ||
strings.Contains(strings.ToLower(acc.Mobile), q) {
filtered = append(filtered, acc)
}
}
accounts = filtered
}
total := len(accounts)
totalPages := 1 totalPages := 1
if total > 0 { if total > 0 {
totalPages = (total + pageSize - 1) / pageSize totalPages = (total + pageSize - 1) / pageSize
@@ -71,11 +85,12 @@ func (h *Handler) addAccount(w http.ResponseWriter, r *http.Request) {
return return
} }
err := h.Store.Update(func(c *config.Config) error { err := h.Store.Update(func(c *config.Config) error {
mobileKey := config.CanonicalMobileKey(acc.Mobile)
for _, a := range c.Accounts { for _, a := range c.Accounts {
if acc.Email != "" && a.Email == acc.Email { if acc.Email != "" && a.Email == acc.Email {
return fmt.Errorf("邮箱已存在") return fmt.Errorf("邮箱已存在")
} }
if acc.Mobile != "" && a.Mobile == acc.Mobile { if mobileKey != "" && config.CanonicalMobileKey(a.Mobile) == mobileKey {
return fmt.Errorf("手机号已存在") return fmt.Errorf("手机号已存在")
} }
} }
@@ -92,6 +107,9 @@ func (h *Handler) addAccount(w http.ResponseWriter, r *http.Request) {
func (h *Handler) deleteAccount(w http.ResponseWriter, r *http.Request) { func (h *Handler) deleteAccount(w http.ResponseWriter, r *http.Request) {
identifier := chi.URLParam(r, "identifier") identifier := chi.URLParam(r, "identifier")
if decoded, err := url.PathUnescape(identifier); err == nil {
identifier = decoded
}
err := h.Store.Update(func(c *config.Config) error { err := h.Store.Update(func(c *config.Config) error {
idx := -1 idx := -1
for i, a := range c.Accounts { for i, a := range c.Accounts {

View File

@@ -1,6 +1,7 @@
package admin package admin
import ( import (
"bytes"
"encoding/json" "encoding/json"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@@ -102,6 +103,45 @@ func TestDeleteAccountSupportsMobileAlias(t *testing.T) {
} }
} }
func TestDeleteAccountSupportsEncodedPlusMobile(t *testing.T) {
h := newAdminTestHandler(t, `{
"accounts":[{"mobile":"+8613800138000","password":"pwd"}]
}`)
r := chi.NewRouter()
r.Delete("/admin/accounts/{identifier}", h.deleteAccount)
req := httptest.NewRequest(http.MethodDelete, "/admin/accounts/"+url.PathEscape("+8613800138000"), nil)
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Fatalf("unexpected status: %d body=%s", rec.Code, rec.Body.String())
}
if got := len(h.Store.Accounts()); got != 0 {
t.Fatalf("expected account removed, remaining=%d", got)
}
}
func TestAddAccountRejectsCanonicalMobileDuplicate(t *testing.T) {
h := newAdminTestHandler(t, `{
"accounts":[{"mobile":"+8613800138000","password":"pwd"}]
}`)
r := chi.NewRouter()
r.Post("/admin/accounts", h.addAccount)
body := []byte(`{"mobile":"13800138000","password":"pwd2"}`)
req := httptest.NewRequest(http.MethodPost, "/admin/accounts", bytes.NewReader(body))
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
if rec.Code != http.StatusBadRequest {
t.Fatalf("unexpected status: %d body=%s", rec.Code, rec.Body.String())
}
if got := len(h.Store.Accounts()); got != 1 {
t.Fatalf("expected no duplicate insert, got=%d", got)
}
}
func TestFindAccountByIdentifierSupportsMobileAndTokenOnly(t *testing.T) { func TestFindAccountByIdentifierSupportsMobileAndTokenOnly(t *testing.T) {
h := newAdminTestHandler(t, `{ h := newAdminTestHandler(t, `{
"accounts":[ "accounts":[
@@ -117,6 +157,13 @@ func TestFindAccountByIdentifierSupportsMobileAndTokenOnly(t *testing.T) {
if accByMobile.Email != "u@example.com" { if accByMobile.Email != "u@example.com" {
t.Fatalf("unexpected account by mobile: %#v", accByMobile) t.Fatalf("unexpected account by mobile: %#v", accByMobile)
} }
accByMobileWithCountryCode, ok := findAccountByIdentifier(h.Store, "+8613800138000")
if !ok {
t.Fatal("expected find by +86 mobile")
}
if accByMobileWithCountryCode.Email != "u@example.com" {
t.Fatalf("unexpected account by +86 mobile: %#v", accByMobileWithCountryCode)
}
tokenOnlyID := "" tokenOnlyID := ""
for _, acc := range h.Store.Accounts() { for _, acc := range h.Store.Accounts() {

View File

@@ -0,0 +1,76 @@
package admin
import (
"context"
"errors"
"net/http"
"strings"
"testing"
"ds2api/internal/auth"
"ds2api/internal/config"
)
type testingDSMock struct {
loginCalls int
createSessionCalls int
getPowCalls int
callCompletionCalls int
}
func (m *testingDSMock) Login(_ context.Context, _ config.Account) (string, error) {
m.loginCalls++
return "new-token", nil
}
func (m *testingDSMock) CreateSession(_ context.Context, _ *auth.RequestAuth, _ int) (string, error) {
m.createSessionCalls++
return "session-id", nil
}
func (m *testingDSMock) GetPow(_ context.Context, _ *auth.RequestAuth, _ int) (string, error) {
m.getPowCalls++
return "", errors.New("should not call GetPow in this test")
}
func (m *testingDSMock) CallCompletion(_ context.Context, _ *auth.RequestAuth, _ map[string]any, _ string, _ int) (*http.Response, error) {
m.callCompletionCalls++
return nil, errors.New("should not call CallCompletion in this test")
}
func TestTestAccount_BatchModeOnlyCreatesSession(t *testing.T) {
t.Setenv("DS2API_CONFIG_JSON", `{"accounts":[{"email":"batch@example.com","password":"pwd","token":""}]}`)
store := config.LoadStore()
ds := &testingDSMock{}
h := &Handler{Store: store, DS: ds}
acc, ok := store.FindAccount("batch@example.com")
if !ok {
t.Fatal("expected test account")
}
result := h.testAccount(context.Background(), acc, "deepseek-chat", "")
if ok, _ := result["success"].(bool); !ok {
t.Fatalf("expected success=true, got %#v", result)
}
msg, _ := result["message"].(string)
if !strings.Contains(msg, "仅会话创建") {
t.Fatalf("expected session-only success message, got %q", msg)
}
if ds.loginCalls != 1 || ds.createSessionCalls != 1 {
t.Fatalf("unexpected Login/CreateSession calls: login=%d createSession=%d", ds.loginCalls, ds.createSessionCalls)
}
if ds.getPowCalls != 0 || ds.callCompletionCalls != 0 {
t.Fatalf("expected no completion flow calls, got getPow=%d callCompletion=%d", ds.getPowCalls, ds.callCompletionCalls)
}
updated, ok := store.FindAccount("batch@example.com")
if !ok {
t.Fatal("expected updated account")
}
if updated.Token != "new-token" {
t.Fatalf("expected refreshed token to be persisted, got %q", updated.Token)
}
if updated.TestStatus != "ok" {
t.Fatalf("expected test status ok, got %q", updated.TestStatus)
}
}

View File

@@ -49,6 +49,7 @@ func (h *Handler) configImport(w http.ResponseWriter, r *http.Request) {
next := c.Clone() next := c.Clone()
if mode == "replace" { if mode == "replace" {
next = incoming.Clone() next = incoming.Clone()
next.Accounts = normalizeAndDedupeAccounts(next.Accounts)
next.VercelSyncHash = c.VercelSyncHash next.VercelSyncHash = c.VercelSyncHash
next.VercelSyncTime = c.VercelSyncTime next.VercelSyncTime = c.VercelSyncTime
importedKeys = len(next.Keys) importedKeys = len(next.Keys)
@@ -73,17 +74,22 @@ func (h *Handler) configImport(w http.ResponseWriter, r *http.Request) {
existingAccounts := map[string]struct{}{} existingAccounts := map[string]struct{}{}
for _, acc := range next.Accounts { for _, acc := range next.Accounts {
existingAccounts[acc.Identifier()] = struct{}{} acc = normalizeAccountForStorage(acc)
key := accountDedupeKey(acc)
if key != "" {
existingAccounts[key] = struct{}{}
}
} }
for _, acc := range incoming.Accounts { for _, acc := range incoming.Accounts {
id := acc.Identifier() acc = normalizeAccountForStorage(acc)
if id == "" { key := accountDedupeKey(acc)
if key == "" {
continue continue
} }
if _, ok := existingAccounts[id]; ok { if _, ok := existingAccounts[key]; ok {
continue continue
} }
existingAccounts[id] = struct{}{} existingAccounts[key] = struct{}{}
next.Accounts = append(next.Accounts, acc) next.Accounts = append(next.Accounts, acc)
importedAccounts++ importedAccounts++
} }

View File

@@ -25,17 +25,28 @@ func (h *Handler) updateConfig(w http.ResponseWriter, r *http.Request) {
if accountsRaw, ok := req["accounts"].([]any); ok { if accountsRaw, ok := req["accounts"].([]any); ok {
existing := map[string]config.Account{} existing := map[string]config.Account{}
for _, a := range old.Accounts { for _, a := range old.Accounts {
existing[a.Identifier()] = a a = normalizeAccountForStorage(a)
key := accountDedupeKey(a)
if key != "" {
existing[key] = a
}
} }
seen := map[string]struct{}{}
accounts := make([]config.Account, 0, len(accountsRaw)) accounts := make([]config.Account, 0, len(accountsRaw))
for _, item := range accountsRaw { for _, item := range accountsRaw {
m, ok := item.(map[string]any) m, ok := item.(map[string]any)
if !ok { if !ok {
continue continue
} }
acc := toAccount(m) acc := normalizeAccountForStorage(toAccount(m))
id := acc.Identifier() key := accountDedupeKey(acc)
if prev, ok := existing[id]; ok { if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
if prev, ok := existing[key]; ok {
if strings.TrimSpace(acc.Password) == "" { if strings.TrimSpace(acc.Password) == "" {
acc.Password = prev.Password acc.Password = prev.Password
} }
@@ -43,6 +54,7 @@ func (h *Handler) updateConfig(w http.ResponseWriter, r *http.Request) {
acc.Token = prev.Token acc.Token = prev.Token
} }
} }
seen[key] = struct{}{}
accounts = append(accounts, acc) accounts = append(accounts, acc)
} }
c.Accounts = accounts c.Accounts = accounts
@@ -138,20 +150,24 @@ func (h *Handler) batchImport(w http.ResponseWriter, r *http.Request) {
if accounts, ok := req["accounts"].([]any); ok { if accounts, ok := req["accounts"].([]any); ok {
existing := map[string]bool{} existing := map[string]bool{}
for _, a := range c.Accounts { for _, a := range c.Accounts {
existing[a.Identifier()] = true a = normalizeAccountForStorage(a)
key := accountDedupeKey(a)
if key != "" {
existing[key] = true
}
} }
for _, item := range accounts { for _, item := range accounts {
m, ok := item.(map[string]any) m, ok := item.(map[string]any)
if !ok { if !ok {
continue continue
} }
acc := toAccount(m) acc := normalizeAccountForStorage(toAccount(m))
id := acc.Identifier() key := accountDedupeKey(acc)
if id == "" || existing[id] { if key == "" || existing[key] {
continue continue
} }
c.Accounts = append(c.Accounts, acc) c.Accounts = append(c.Accounts, acc)
existing[id] = true existing[key] = true
importedAccounts++ importedAccounts++
} }
} }

View File

@@ -265,3 +265,57 @@ func TestConfigImportRejectsMergedRuntimeConflict(t *testing.T) {
t.Fatalf("runtime should remain unchanged, runtime=%+v", snap.Runtime) t.Fatalf("runtime should remain unchanged, runtime=%+v", snap.Runtime)
} }
} }
func TestConfigImportMergeDedupesMobileAliases(t *testing.T) {
h := newAdminTestHandler(t, `{
"keys":["k1"],
"accounts":[{"mobile":"+8613800138000","password":"p1"}]
}`)
merge := map[string]any{
"mode": "merge",
"config": map[string]any{
"accounts": []any{
map[string]any{"mobile": "13800138000", "password": "p2"},
},
},
}
b, _ := json.Marshal(merge)
req := httptest.NewRequest(http.MethodPost, "/admin/config/import?mode=merge", bytes.NewReader(b))
rec := httptest.NewRecorder()
h.configImport(rec, req)
if rec.Code != http.StatusOK {
t.Fatalf("status=%d body=%s", rec.Code, rec.Body.String())
}
if got := len(h.Store.Accounts()); got != 1 {
t.Fatalf("expected merge dedupe by canonical mobile, got=%d", got)
}
}
func TestUpdateConfigDedupesMobileAliases(t *testing.T) {
h := newAdminTestHandler(t, `{
"keys":["k1"],
"accounts":[{"mobile":"+8613800138000","password":"old"}]
}`)
reqBody := map[string]any{
"accounts": []any{
map[string]any{"mobile": "+8613800138000"},
map[string]any{"mobile": "13800138000"},
},
}
b, _ := json.Marshal(reqBody)
req := httptest.NewRequest(http.MethodPost, "/admin/config", bytes.NewReader(b))
rec := httptest.NewRecorder()
h.updateConfig(rec, req)
if rec.Code != http.StatusOK {
t.Fatalf("status=%d body=%s", rec.Code, rec.Body.String())
}
accounts := h.Store.Accounts()
if len(accounts) != 1 {
t.Fatalf("expected update dedupe by canonical mobile, got=%d", len(accounts))
}
if accounts[0].Identifier() != "+8613800138000" {
t.Fatalf("unexpected identifier: %q", accounts[0].Identifier())
}
}

View File

@@ -59,9 +59,11 @@ func toStringSlice(v any) ([]string, bool) {
} }
func toAccount(m map[string]any) config.Account { func toAccount(m map[string]any) config.Account {
email := fieldString(m, "email")
mobile := config.NormalizeMobileForStorage(fieldString(m, "mobile"))
return config.Account{ return config.Account{
Email: fieldString(m, "email"), Email: email,
Mobile: fieldString(m, "mobile"), Mobile: mobile,
Password: fieldString(m, "password"), Password: fieldString(m, "password"),
Token: fieldString(m, "token"), Token: fieldString(m, "token"),
} }
@@ -90,12 +92,52 @@ func accountMatchesIdentifier(acc config.Account, identifier string) bool {
if strings.TrimSpace(acc.Email) == id { if strings.TrimSpace(acc.Email) == id {
return true return true
} }
if strings.TrimSpace(acc.Mobile) == id { if mobileKey := config.CanonicalMobileKey(id); mobileKey != "" && mobileKey == config.CanonicalMobileKey(acc.Mobile) {
return true return true
} }
return acc.Identifier() == id return acc.Identifier() == id
} }
func normalizeAccountForStorage(acc config.Account) config.Account {
acc.Email = strings.TrimSpace(acc.Email)
acc.Mobile = config.NormalizeMobileForStorage(acc.Mobile)
return acc
}
func accountDedupeKey(acc config.Account) string {
if email := strings.TrimSpace(acc.Email); email != "" {
return "email:" + email
}
if mobile := config.CanonicalMobileKey(acc.Mobile); mobile != "" {
return "mobile:" + mobile
}
if id := strings.TrimSpace(acc.Identifier()); id != "" {
return "id:" + id
}
return ""
}
func normalizeAndDedupeAccounts(accounts []config.Account) []config.Account {
if len(accounts) == 0 {
return nil
}
out := make([]config.Account, 0, len(accounts))
seen := make(map[string]struct{}, len(accounts))
for _, acc := range accounts {
acc = normalizeAccountForStorage(acc)
key := accountDedupeKey(acc)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, acc)
}
return out
}
func findAccountByIdentifier(store ConfigStore, identifier string) (config.Account, bool) { func findAccountByIdentifier(store ConfigStore, identifier string) (config.Account, bool) {
id := strings.TrimSpace(identifier) id := strings.TrimSpace(identifier)
if id == "" { if id == "" {

View File

@@ -182,7 +182,7 @@ func TestToAccountAllFields(t *testing.T) {
if acc.Email != "user@test.com" { if acc.Email != "user@test.com" {
t.Fatalf("unexpected email: %q", acc.Email) t.Fatalf("unexpected email: %q", acc.Email)
} }
if acc.Mobile != "13800138000" { if acc.Mobile != "+8613800138000" {
t.Fatalf("unexpected mobile: %q", acc.Mobile) t.Fatalf("unexpected mobile: %q", acc.Mobile)
} }
if acc.Password != "secret" { if acc.Password != "secret" {

View File

@@ -5,6 +5,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"strings"
"testing" "testing"
"ds2api/internal/sse" "ds2api/internal/sse"
@@ -67,6 +68,7 @@ func TestGoCompatToolcallFixtures(t *testing.T) {
var fixture struct { var fixture struct {
Text string `json:"text"` Text string `json:"text"`
ToolNames []string `json:"tool_names"` ToolNames []string `json:"tool_names"`
Mode string `json:"mode"`
} }
mustLoadJSON(t, fixturePath, &fixture) mustLoadJSON(t, fixturePath, &fixture)
@@ -75,7 +77,13 @@ func TestGoCompatToolcallFixtures(t *testing.T) {
} }
mustLoadJSON(t, expectedPath, &expected) mustLoadJSON(t, expectedPath, &expected)
got := util.ParseToolCalls(fixture.Text, fixture.ToolNames) var got []util.ParsedToolCall
switch strings.ToLower(strings.TrimSpace(fixture.Mode)) {
case "standalone":
got = util.ParseStandaloneToolCalls(fixture.Text, fixture.ToolNames)
default:
got = util.ParseToolCalls(fixture.Text, fixture.ToolNames)
}
if len(got) == 0 && len(expected.Calls) == 0 { if len(got) == 0 && len(expected.Calls) == 0 {
continue continue
} }

View File

@@ -10,8 +10,8 @@ func (a Account) Identifier() string {
if strings.TrimSpace(a.Email) != "" { if strings.TrimSpace(a.Email) != "" {
return strings.TrimSpace(a.Email) return strings.TrimSpace(a.Email)
} }
if strings.TrimSpace(a.Mobile) != "" { if mobile := NormalizeMobileForStorage(a.Mobile); mobile != "" {
return strings.TrimSpace(a.Mobile) return mobile
} }
// Backward compatibility: old configs may contain token-only accounts. // Backward compatibility: old configs may contain token-only accounts.
// Use a stable non-sensitive synthetic id so they can still join the pool. // Use a stable non-sensitive synthetic id so they can still join the pool.

View File

@@ -202,7 +202,7 @@ func TestConfigCloneNilMaps(t *testing.T) {
func TestAccountIdentifierPreferenceMobileOverToken(t *testing.T) { func TestAccountIdentifierPreferenceMobileOverToken(t *testing.T) {
acc := Account{Mobile: "13800138000", Token: "tok"} acc := Account{Mobile: "13800138000", Token: "tok"}
if acc.Identifier() != "13800138000" { if acc.Identifier() != "+8613800138000" {
t.Fatalf("expected mobile identifier, got %q", acc.Identifier()) t.Fatalf("expected mobile identifier, got %q", acc.Identifier())
} }
} }

82
internal/config/mobile.go Normal file
View File

@@ -0,0 +1,82 @@
package config
import "strings"
// NormalizeMobileForStorage normalizes user input to a stable storage format.
// It keeps existing country codes and auto-prefixes mainland China numbers with +86.
func NormalizeMobileForStorage(raw string) string {
digits, hasPlus := extractMobileDigits(raw)
if digits == "" {
return ""
}
if hasPlus {
return "+" + digits
}
if isChinaMobileWithCountryCode(digits) {
return "+86" + digits[2:]
}
if isChinaMainlandMobileDigits(digits) {
return "+86" + digits
}
// For non-China numbers without a leading +, preserve semantics by adding it.
return "+" + digits
}
// CanonicalMobileKey returns the comparison key used by dedupe/matching logic.
func CanonicalMobileKey(raw string) string {
return NormalizeMobileForStorage(raw)
}
func extractMobileDigits(raw string) (digits string, hasPlus bool) {
s := strings.TrimSpace(raw)
if s == "" {
return "", false
}
for _, r := range s {
switch {
case r >= '0' && r <= '9':
goto collect
case isMobileSeparator(r):
continue
case r == '+':
hasPlus = true
goto collect
default:
goto collect
}
}
collect:
var b strings.Builder
b.Grow(len(s))
for _, r := range s {
if r >= '0' && r <= '9' {
b.WriteRune(r)
}
}
return b.String(), hasPlus
}
func isChinaMainlandMobileDigits(digits string) bool {
if len(digits) != 11 || digits[0] != '1' {
return false
}
return digits[1] >= '3' && digits[1] <= '9'
}
func isChinaMobileWithCountryCode(digits string) bool {
if len(digits) != 13 || !strings.HasPrefix(digits, "86") {
return false
}
return isChinaMainlandMobileDigits(digits[2:])
}
func isMobileSeparator(r rune) bool {
switch r {
case ' ', '\t', '\n', '\r', '-', '(', ')', '.', '/':
return true
default:
return false
}
}

View File

@@ -0,0 +1,36 @@
package config
import "testing"
func TestNormalizeMobileForStorageChinaMainlandAddsPlus86(t *testing.T) {
if got := NormalizeMobileForStorage("13800138000"); got != "+8613800138000" {
t.Fatalf("got %q", got)
}
}
func TestNormalizeMobileForStorageChinaWithCountryCode(t *testing.T) {
if got := NormalizeMobileForStorage("8613800138000"); got != "+8613800138000" {
t.Fatalf("got %q", got)
}
}
func TestNormalizeMobileForStorageKeepsExistingCountryCode(t *testing.T) {
if got := NormalizeMobileForStorage(" +1 (415) 555-2671 "); got != "+14155552671" {
t.Fatalf("got %q", got)
}
}
func TestCanonicalMobileKeyMatchesChinaAliases(t *testing.T) {
a := CanonicalMobileKey("+8613800138000")
b := CanonicalMobileKey("13800138000")
c := CanonicalMobileKey("86 13800138000")
if a == "" || a != b || b != c {
t.Fatalf("alias mismatch: a=%q b=%q c=%q", a, b, c)
}
}
func TestCanonicalMobileKeyEmptyForInvalidInput(t *testing.T) {
if got := CanonicalMobileKey("() --"); got != "" {
t.Fatalf("got %q", got)
}
}

View File

@@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"strings" "strings"
"unicode"
"ds2api/internal/auth" "ds2api/internal/auth"
"ds2api/internal/config" "ds2api/internal/config"
@@ -20,8 +21,9 @@ func (c *Client) Login(ctx context.Context, acc config.Account) (string, error)
if email := strings.TrimSpace(acc.Email); email != "" { if email := strings.TrimSpace(acc.Email); email != "" {
payload["email"] = email payload["email"] = email
} else if mobile := strings.TrimSpace(acc.Mobile); mobile != "" { } else if mobile := strings.TrimSpace(acc.Mobile); mobile != "" {
payload["mobile"] = mobile loginMobile, areaCode := normalizeMobileForLogin(mobile)
payload["area_code"] = nil payload["mobile"] = loginMobile
payload["area_code"] = areaCode
} else { } else {
return "", errors.New("missing email/mobile") return "", errors.New("missing email/mobile")
} }
@@ -151,3 +153,26 @@ func isTokenInvalid(status int, code int, msg string) bool {
} }
return strings.Contains(msg, "token") || strings.Contains(msg, "unauthorized") return strings.Contains(msg, "token") || strings.Contains(msg, "unauthorized")
} }
func normalizeMobileForLogin(raw string) (mobile string, areaCode any) {
s := strings.TrimSpace(raw)
if s == "" {
return "", nil
}
hasPlus := strings.HasPrefix(s, "+")
var b strings.Builder
b.Grow(len(s))
for _, r := range s {
if unicode.IsDigit(r) {
b.WriteRune(r)
}
}
digits := b.String()
if digits == "" {
return "", nil
}
if (hasPlus || strings.HasPrefix(digits, "86")) && strings.HasPrefix(digits, "86") && len(digits) == 13 {
return digits[2:], nil
}
return digits, nil
}

View File

@@ -0,0 +1,33 @@
package deepseek
import "testing"
func TestNormalizeMobileForLogin_ChinaWithPlus86(t *testing.T) {
mobile, areaCode := normalizeMobileForLogin("+8613800138000")
if mobile != "13800138000" {
t.Fatalf("unexpected mobile: %q", mobile)
}
if areaCode != nil {
t.Fatalf("expected nil areaCode, got %#v", areaCode)
}
}
func TestNormalizeMobileForLogin_ChinaWith86Prefix(t *testing.T) {
mobile, areaCode := normalizeMobileForLogin("8613800138000")
if mobile != "13800138000" {
t.Fatalf("unexpected mobile: %q", mobile)
}
if areaCode != nil {
t.Fatalf("expected nil areaCode, got %#v", areaCode)
}
}
func TestNormalizeMobileForLogin_KeepPlainDigits(t *testing.T) {
mobile, areaCode := normalizeMobileForLogin("13800138000")
if mobile != "13800138000" {
t.Fatalf("unexpected mobile: %q", mobile)
}
if areaCode != nil {
t.Fatalf("expected nil areaCode, got %#v", areaCode)
}
}

View File

@@ -8,7 +8,7 @@ import (
) )
func BuildChatCompletion(completionID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any { func BuildChatCompletion(completionID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
detected := util.ParseToolCalls(finalText, toolNames) detected := util.ParseStandaloneToolCalls(finalText, toolNames)
finishReason := "stop" finishReason := "stop"
messageObj := map[string]any{"role": "assistant", "content": finalText} messageObj := map[string]any{"role": "assistant", "content": finalText}
if strings.TrimSpace(finalThinking) != "" { if strings.TrimSpace(finalThinking) != "" {

View File

@@ -11,12 +11,9 @@ import (
) )
func BuildResponseObject(responseID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any { func BuildResponseObject(responseID, model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
// Align responses tool-call semantics with chat/completions: // Strict mode: only standalone, structured tool-call payloads are treated
// mixed prose + tool_call payloads should still be interpreted as tool calls. // as executable tool calls.
detected := util.ParseToolCalls(finalText, toolNames) detected := util.ParseStandaloneToolCalls(finalText, toolNames)
if len(detected) == 0 && strings.TrimSpace(finalThinking) != "" {
detected = util.ParseToolCalls(finalThinking, toolNames)
}
exposedOutputText := finalText exposedOutputText := finalText
output := make([]any, 0, 2) output := make([]any, 0, 2)
if len(detected) > 0 { if len(detected) > 0 {

View File

@@ -45,7 +45,7 @@ func TestBuildResponseObjectToolCallsFollowChatShape(t *testing.T) {
} }
} }
func TestBuildResponseObjectTreatsMixedProseToolPayloadAsToolCall(t *testing.T) { func TestBuildResponseObjectTreatsMixedProseToolPayloadAsText(t *testing.T) {
obj := BuildResponseObject( obj := BuildResponseObject(
"resp_test", "resp_test",
"gpt-4o", "gpt-4o",
@@ -56,17 +56,16 @@ func TestBuildResponseObjectTreatsMixedProseToolPayloadAsToolCall(t *testing.T)
) )
outputText, _ := obj["output_text"].(string) outputText, _ := obj["output_text"].(string)
if outputText != "" { if outputText == "" {
t.Fatalf("expected output_text hidden once tool calls are detected, got %q", outputText) t.Fatalf("expected output_text preserved for mixed prose payload")
} }
output, _ := obj["output"].([]any) output, _ := obj["output"].([]any)
if len(output) != 1 { if len(output) != 1 {
t.Fatalf("expected function_call output only, got %#v", obj["output"]) t.Fatalf("expected one message output item, got %#v", obj["output"])
} }
first, _ := output[0].(map[string]any) first, _ := output[0].(map[string]any)
if first["type"] != "function_call" { if first["type"] != "message" {
t.Fatalf("expected first output type function_call, got %#v", first["type"]) t.Fatalf("expected message output type, got %#v", first["type"])
} }
} }
@@ -127,7 +126,7 @@ func TestBuildResponseObjectReasoningOnlyFallsBackToOutputText(t *testing.T) {
} }
} }
func TestBuildResponseObjectDetectsToolCallFromThinkingChannel(t *testing.T) { func TestBuildResponseObjectIgnoresToolCallFromThinkingChannel(t *testing.T) {
obj := BuildResponseObject( obj := BuildResponseObject(
"resp_test", "resp_test",
"gpt-4o", "gpt-4o",
@@ -139,10 +138,10 @@ func TestBuildResponseObjectDetectsToolCallFromThinkingChannel(t *testing.T) {
output, _ := obj["output"].([]any) output, _ := obj["output"].([]any)
if len(output) != 1 { if len(output) != 1 {
t.Fatalf("expected function_call output only, got %#v", obj["output"]) t.Fatalf("expected one message output item, got %#v", obj["output"])
} }
first, _ := output[0].(map[string]any) first, _ := output[0].(map[string]any)
if first["type"] != "function_call" { if first["type"] != "message" {
t.Fatalf("expected output function_call, got %#v", first["type"]) t.Fatalf("expected output message, got %#v", first["type"])
} }
} }

View File

@@ -10,8 +10,10 @@ const {
} = require('./sse_parse'); } = require('./sse_parse');
const { const {
resolveToolcallPolicy, resolveToolcallPolicy,
formatIncrementalToolCallDeltas,
normalizePreparedToolNames, normalizePreparedToolNames,
boolDefaultTrue, boolDefaultTrue,
filterIncrementalToolCallDeltasByAllowed,
} = require('./toolcall_policy'); } = require('./toolcall_policy');
const { const {
estimateTokens, estimateTokens,
@@ -82,7 +84,9 @@ module.exports.__test = {
shouldSkipPath, shouldSkipPath,
asString, asString,
resolveToolcallPolicy, resolveToolcallPolicy,
formatIncrementalToolCallDeltas,
normalizePreparedToolNames, normalizePreparedToolNames,
boolDefaultTrue, boolDefaultTrue,
filterIncrementalToolCallDeltasByAllowed,
estimateTokens, estimateTokens,
}; };

View File

@@ -68,6 +68,47 @@ function formatIncrementalToolCallDeltas(deltas, idStore) {
return out; return out;
} }
function filterIncrementalToolCallDeltasByAllowed(deltas, allowedNames, seenNames) {
if (!Array.isArray(deltas) || deltas.length === 0) {
return [];
}
const seen = seenNames instanceof Map ? seenNames : new Map();
const allowed = new Set((allowedNames || []).filter((name) => asString(name) !== ''));
if (allowed.size === 0) {
for (const d of deltas) {
if (d && typeof d === 'object' && asString(d.name)) {
const index = Number.isInteger(d.index) ? d.index : 0;
seen.set(index, '__blocked__');
}
}
return [];
}
const out = [];
for (const d of deltas) {
if (!d || typeof d !== 'object') {
continue;
}
const index = Number.isInteger(d.index) ? d.index : 0;
const name = asString(d.name);
if (name) {
if (!allowed.has(name)) {
seen.set(index, '__blocked__');
continue;
}
seen.set(index, name);
out.push(d);
continue;
}
const existing = asString(seen.get(index));
if (!existing || existing === '__blocked__') {
continue;
}
out.push(d);
}
return out;
}
function ensureStreamToolCallID(idStore, index) { function ensureStreamToolCallID(idStore, index) {
const key = Number.isInteger(index) ? index : 0; const key = Number.isInteger(index) ? index : 0;
const existing = idStore.get(key); const existing = idStore.get(key);
@@ -104,4 +145,5 @@ module.exports = {
normalizePreparedToolNames, normalizePreparedToolNames,
boolDefaultTrue, boolDefaultTrue,
formatIncrementalToolCallDeltas, formatIncrementalToolCallDeltas,
filterIncrementalToolCallDeltasByAllowed,
}; };

View File

@@ -5,7 +5,7 @@ const {
createToolSieveState, createToolSieveState,
processToolSieveChunk, processToolSieveChunk,
flushToolSieve, flushToolSieve,
parseToolCalls, parseStandaloneToolCalls,
formatOpenAIStreamToolCalls, formatOpenAIStreamToolCalls,
} = require('../helpers/stream-tool-sieve'); } = require('../helpers/stream-tool-sieve');
const { const {
@@ -24,7 +24,6 @@ const {
} = require('./token_usage'); } = require('./token_usage');
const { const {
resolveToolcallPolicy, resolveToolcallPolicy,
formatIncrementalToolCallDeltas,
} = require('./toolcall_policy'); } = require('./toolcall_policy');
const { const {
createChatCompletionEmitter, createChatCompletionEmitter,
@@ -130,7 +129,6 @@ async function handleVercelStream(req, res, rawBody, payload) {
let thinkingText = ''; let thinkingText = '';
let outputText = ''; let outputText = '';
const toolSieveEnabled = toolPolicy.toolSieveEnabled; const toolSieveEnabled = toolPolicy.toolSieveEnabled;
const emitEarlyToolDeltas = toolPolicy.emitEarlyToolDeltas;
const toolSieveState = createToolSieveState(); const toolSieveState = createToolSieveState();
let toolCallsEmitted = false; let toolCallsEmitted = false;
const streamToolCallIDs = new Map(); const streamToolCallIDs = new Map();
@@ -155,13 +153,18 @@ async function handleVercelStream(req, res, rawBody, payload) {
await releaseLease(); await releaseLease();
return; return;
} }
const detected = parseToolCalls(outputText, toolNames); const detected = parseStandaloneToolCalls(outputText, toolNames);
if (detected.length > 0 && !toolCallsEmitted) { if (detected.length > 0 && !toolCallsEmitted) {
toolCallsEmitted = true; toolCallsEmitted = true;
sendDeltaFrame({ tool_calls: formatOpenAIStreamToolCalls(detected) }); sendDeltaFrame({ tool_calls: formatOpenAIStreamToolCalls(detected, streamToolCallIDs) });
} else if (toolSieveEnabled) { } else if (toolSieveEnabled) {
const tailEvents = flushToolSieve(toolSieveState, toolNames); const tailEvents = flushToolSieve(toolSieveState, toolNames);
for (const evt of tailEvents) { for (const evt of tailEvents) {
if (evt.type === 'tool_calls' && Array.isArray(evt.calls) && evt.calls.length > 0) {
toolCallsEmitted = true;
sendDeltaFrame({ tool_calls: formatOpenAIStreamToolCalls(evt.calls, streamToolCallIDs) });
continue;
}
if (evt.text) { if (evt.text) {
sendDeltaFrame({ content: evt.text }); sendDeltaFrame({ content: evt.text });
} }
@@ -252,17 +255,9 @@ async function handleVercelStream(req, res, rawBody, payload) {
} }
const events = processToolSieveChunk(toolSieveState, p.text, toolNames); const events = processToolSieveChunk(toolSieveState, p.text, toolNames);
for (const evt of events) { for (const evt of events) {
if (evt.type === 'tool_call_deltas' && Array.isArray(evt.deltas) && evt.deltas.length > 0) {
if (!emitEarlyToolDeltas) {
continue;
}
toolCallsEmitted = true;
sendDeltaFrame({ tool_calls: formatIncrementalToolCallDeltas(evt.deltas, streamToolCallIDs) });
continue;
}
if (evt.type === 'tool_calls') { if (evt.type === 'tool_calls') {
toolCallsEmitted = true; toolCallsEmitted = true;
sendDeltaFrame({ tool_calls: formatOpenAIStreamToolCalls(evt.calls) }); sendDeltaFrame({ tool_calls: formatOpenAIStreamToolCalls(evt.calls, streamToolCallIDs) });
continue; continue;
} }
if (evt.text) { if (evt.text) {

View File

@@ -2,13 +2,13 @@
const crypto = require('crypto'); const crypto = require('crypto');
function formatOpenAIStreamToolCalls(calls) { function formatOpenAIStreamToolCalls(calls, idStore) {
if (!Array.isArray(calls) || calls.length === 0) { if (!Array.isArray(calls) || calls.length === 0) {
return []; return [];
} }
return calls.map((c, idx) => ({ return calls.map((c, idx) => ({
index: idx, index: idx,
id: `call_${newCallID()}`, id: ensureStreamToolCallID(idStore, idx),
type: 'function', type: 'function',
function: { function: {
name: c.name, name: c.name,
@@ -17,6 +17,20 @@ function formatOpenAIStreamToolCalls(calls) {
})); }));
} }
function ensureStreamToolCallID(idStore, index) {
if (!(idStore instanceof Map)) {
return `call_${newCallID()}`;
}
const key = Number.isInteger(index) ? index : 0;
const existing = idStore.get(key);
if (existing) {
return existing;
}
const next = `call_${newCallID()}`;
idStore.set(key, next);
return next;
}
function newCallID() { function newCallID() {
if (typeof crypto.randomUUID === 'function') { if (typeof crypto.randomUUID === 'function') {
return crypto.randomUUID().replace(/-/g, ''); return crypto.randomUUID().replace(/-/g, '');

View File

@@ -1,226 +0,0 @@
'use strict';
const {
looksLikeToolExampleContext,
insideCodeFence,
} = require('./state');
const {
findObjectFieldValueStart,
parseJSONStringLiteral,
skipSpaces,
} = require('./jsonscan');
function buildIncrementalToolDeltas(state) {
const captured = state.capture || '';
if (!captured) {
return [];
}
if (looksLikeToolExampleContext(state.recentTextTail)) {
return [];
}
const lower = captured.toLowerCase();
const keyIdx = lower.indexOf('tool_calls');
if (keyIdx < 0) {
return [];
}
const start = captured.slice(0, keyIdx).lastIndexOf('{');
if (start < 0) {
return [];
}
if (insideCodeFence((state.recentTextTail || '') + captured.slice(0, start))) {
return [];
}
const callStart = findFirstToolCallObjectStart(captured, keyIdx);
if (callStart < 0) {
return [];
}
const deltas = [];
if (!state.toolName) {
const name = extractToolCallName(captured, callStart);
if (!name) {
return [];
}
state.toolName = name;
}
if (state.toolArgsStart < 0) {
const args = findToolCallArgsStart(captured, callStart);
if (args) {
state.toolArgsString = Boolean(args.stringMode);
state.toolArgsStart = state.toolArgsString ? args.start + 1 : args.start;
state.toolArgsSent = state.toolArgsStart;
}
}
if (!state.toolNameSent) {
if (state.toolArgsStart < 0) {
return [];
}
state.toolNameSent = true;
deltas.push({ index: 0, name: state.toolName });
}
if (state.toolArgsStart < 0 || state.toolArgsDone) {
return deltas;
}
const progress = scanToolCallArgsProgress(captured, state.toolArgsStart, state.toolArgsString);
if (!progress) {
return deltas;
}
if (progress.end > state.toolArgsSent) {
deltas.push({
index: 0,
arguments: captured.slice(state.toolArgsSent, progress.end),
});
state.toolArgsSent = progress.end;
}
if (progress.complete) {
state.toolArgsDone = true;
}
return deltas;
}
function findFirstToolCallObjectStart(text, keyIdx) {
const arrStart = findToolCallsArrayStart(text, keyIdx);
if (arrStart < 0) {
return -1;
}
const i = skipSpaces(text, arrStart + 1);
if (i >= text.length || text[i] !== '{') {
return -1;
}
return i;
}
function findToolCallsArrayStart(text, keyIdx) {
let i = keyIdx + 'tool_calls'.length;
while (i < text.length && text[i] !== ':') {
i += 1;
}
if (i >= text.length) {
return -1;
}
i = skipSpaces(text, i + 1);
if (i >= text.length || text[i] !== '[') {
return -1;
}
return i;
}
function extractToolCallName(text, callStart) {
let valueStart = findObjectFieldValueStart(text, callStart, ['name']);
if (valueStart < 0 || text[valueStart] !== '"') {
const fnStart = findFunctionObjectStart(text, callStart);
if (fnStart < 0) {
return '';
}
valueStart = findObjectFieldValueStart(text, fnStart, ['name']);
if (valueStart < 0 || text[valueStart] !== '"') {
return '';
}
}
const parsed = parseJSONStringLiteral(text, valueStart);
if (!parsed) {
return '';
}
return parsed.value;
}
function findToolCallArgsStart(text, callStart) {
const keys = ['input', 'arguments', 'args', 'parameters', 'params'];
let valueStart = findObjectFieldValueStart(text, callStart, keys);
if (valueStart < 0) {
const fnStart = findFunctionObjectStart(text, callStart);
if (fnStart < 0) {
return null;
}
valueStart = findObjectFieldValueStart(text, fnStart, keys);
if (valueStart < 0) {
return null;
}
}
if (valueStart >= text.length) {
return null;
}
const ch = text[valueStart];
if (ch === '{' || ch === '[') {
return { start: valueStart, stringMode: false };
}
if (ch === '"') {
return { start: valueStart, stringMode: true };
}
return null;
}
function scanToolCallArgsProgress(text, start, stringMode) {
if (start < 0 || start > text.length) {
return null;
}
if (stringMode) {
let escaped = false;
for (let i = start; i < text.length; i += 1) {
const ch = text[i];
if (escaped) {
escaped = false;
continue;
}
if (ch === '\\') {
escaped = true;
continue;
}
if (ch === '"') {
return { end: i, complete: true };
}
}
return { end: text.length, complete: false };
}
if (start >= text.length || (text[start] !== '{' && text[start] !== '[')) {
return null;
}
let depth = 0;
let quote = '';
let escaped = false;
for (let i = start; i < text.length; i += 1) {
const ch = text[i];
if (quote) {
if (escaped) {
escaped = false;
continue;
}
if (ch === '\\') {
escaped = true;
continue;
}
if (ch === quote) {
quote = '';
}
continue;
}
if (ch === '"' || ch === "'") {
quote = ch;
continue;
}
if (ch === '{' || ch === '[') {
depth += 1;
continue;
}
if (ch === '}' || ch === ']') {
depth -= 1;
if (depth === 0) {
return { end: i + 1, complete: true };
}
}
}
return { end: text.length, complete: false };
}
function findFunctionObjectStart(text, callStart) {
const valueStart = findObjectFieldValueStart(text, callStart, ['function']);
if (valueStart < 0 || valueStart >= text.length || text[valueStart] !== '{') {
return -1;
}
return valueStart;
}
module.exports = {
buildIncrementalToolDeltas,
};

View File

@@ -10,7 +10,9 @@ const {
const { const {
extractToolNames, extractToolNames,
parseToolCalls, parseToolCalls,
parseToolCallsDetailed,
parseStandaloneToolCalls, parseStandaloneToolCalls,
parseStandaloneToolCallsDetailed,
} = require('./parse'); } = require('./parse');
const { const {
formatOpenAIStreamToolCalls, formatOpenAIStreamToolCalls,
@@ -22,6 +24,8 @@ module.exports = {
processToolSieveChunk, processToolSieveChunk,
flushToolSieve, flushToolSieve,
parseToolCalls, parseToolCalls,
parseToolCallsDetailed,
parseStandaloneToolCalls, parseStandaloneToolCalls,
parseStandaloneToolCallsDetailed,
formatOpenAIStreamToolCalls, formatOpenAIStreamToolCalls,
}; };

View File

@@ -1,14 +1,14 @@
'use strict'; 'use strict';
const TOOL_CALL_PATTERN = /\{\s*["']tool_calls["']\s*:\s*\[(.*?)\]\s*\}/s;
const { const {
toStringSafe, toStringSafe,
looksLikeToolExampleContext, looksLikeToolExampleContext,
} = require('./state'); } = require('./state');
const { const {
extractJSONObjectFrom, stripFencedCodeBlocks,
} = require('./jsonscan'); buildToolCallCandidates,
parseToolCallsPayload,
} = require('./parse_payload');
function extractToolNames(tools) { function extractToolNames(tools) {
if (!Array.isArray(tools) || tools.length === 0) { if (!Array.isArray(tools) || tools.length === 0) {
@@ -29,245 +29,144 @@ function extractToolNames(tools) {
} }
function parseToolCalls(text, toolNames) { function parseToolCalls(text, toolNames) {
return parseToolCallsDetailed(text, toolNames).calls;
}
function parseToolCallsDetailed(text, toolNames) {
const result = emptyParseResult();
if (!toStringSafe(text)) { if (!toStringSafe(text)) {
return []; return result;
} }
const sanitized = stripFencedCodeBlocks(text); const sanitized = stripFencedCodeBlocks(text);
if (!toStringSafe(sanitized)) { if (!toStringSafe(sanitized)) {
return []; return result;
} }
result.sawToolCallSyntax = sanitized.toLowerCase().includes('tool_calls');
const candidates = buildToolCallCandidates(sanitized); const candidates = buildToolCallCandidates(sanitized);
let parsed = []; let parsed = [];
for (const c of candidates) { for (const c of candidates) {
parsed = parseToolCallsPayload(c); parsed = parseToolCallsPayload(c);
if (parsed.length > 0) { if (parsed.length > 0) {
result.sawToolCallSyntax = true;
break; break;
} }
} }
if (parsed.length === 0) { if (parsed.length === 0) {
return []; return result;
} }
return filterToolCalls(parsed, toolNames);
}
function stripFencedCodeBlocks(text) { const filtered = filterToolCallsDetailed(parsed, toolNames);
const t = typeof text === 'string' ? text : ''; result.calls = filtered.calls;
if (!t) { result.rejectedToolNames = filtered.rejectedToolNames;
return ''; result.rejectedByPolicy = filtered.rejectedToolNames.length > 0 && filtered.calls.length === 0;
} return result;
return t.replace(/```[\s\S]*?```/g, ' ');
} }
function parseStandaloneToolCalls(text, toolNames) { function parseStandaloneToolCalls(text, toolNames) {
return parseStandaloneToolCallsDetailed(text, toolNames).calls;
}
function parseStandaloneToolCallsDetailed(text, toolNames) {
const result = emptyParseResult();
const trimmed = toStringSafe(text); const trimmed = toStringSafe(text);
if (!trimmed) { if (!trimmed) {
return []; return result;
}
if ((trimmed.startsWith('```') && trimmed.endsWith('```')) || trimmed.includes('```')) {
return [];
} }
if (looksLikeToolExampleContext(trimmed)) { if (looksLikeToolExampleContext(trimmed)) {
return []; return result;
} }
const candidates = [trimmed]; result.sawToolCallSyntax = trimmed.toLowerCase().includes('tool_calls');
if (trimmed.startsWith('```') && trimmed.endsWith('```')) { if (!trimmed.startsWith('{') && !trimmed.startsWith('[')) {
const m = trimmed.match(/```(?:json)?\s*([\s\S]*?)\s*```/i); return result;
if (m && m[1]) {
candidates.push(toStringSafe(m[1]));
}
} }
for (const candidate of candidates) {
const c = toStringSafe(candidate); const parsed = parseToolCallsPayload(trimmed);
if (!c) { if (parsed.length === 0) {
continue; return result;
}
if (!c.startsWith('{') && !c.startsWith('[')) {
continue;
}
const parsed = parseToolCallsPayload(c);
if (parsed.length > 0) {
return filterToolCalls(parsed, toolNames);
}
} }
return [];
result.sawToolCallSyntax = true;
const filtered = filterToolCallsDetailed(parsed, toolNames);
result.calls = filtered.calls;
result.rejectedToolNames = filtered.rejectedToolNames;
result.rejectedByPolicy = filtered.rejectedToolNames.length > 0 && filtered.calls.length === 0;
return result;
} }
function buildToolCallCandidates(text) { function emptyParseResult() {
const trimmed = toStringSafe(text);
const candidates = [trimmed];
const fenced = trimmed.match(/```(?:json)?\s*([\s\S]*?)\s*```/gi) || [];
for (const block of fenced) {
const m = block.match(/```(?:json)?\s*([\s\S]*?)\s*```/i);
if (m && m[1]) {
candidates.push(toStringSafe(m[1]));
}
}
for (const candidate of extractToolCallObjects(trimmed)) {
candidates.push(toStringSafe(candidate));
}
const first = trimmed.indexOf('{');
const last = trimmed.lastIndexOf('}');
if (first >= 0 && last > first) {
candidates.push(toStringSafe(trimmed.slice(first, last + 1)));
}
const m = trimmed.match(TOOL_CALL_PATTERN);
if (m && m[1]) {
candidates.push(`{"tool_calls":[${m[1]}]}`);
}
return [...new Set(candidates.filter(Boolean))];
}
function extractToolCallObjects(text) {
const raw = toStringSafe(text);
if (!raw) {
return [];
}
const lower = raw.toLowerCase();
const out = [];
let offset = 0;
// eslint-disable-next-line no-constant-condition
while (true) {
let idx = lower.indexOf('tool_calls', offset);
if (idx < 0) {
break;
}
let start = raw.slice(0, idx).lastIndexOf('{');
while (start >= 0) {
const obj = extractJSONObjectFrom(raw, start);
if (obj.ok) {
out.push(raw.slice(start, obj.end).trim());
offset = obj.end;
idx = -1;
break;
}
start = raw.slice(0, start).lastIndexOf('{');
}
if (idx >= 0) {
offset = idx + 'tool_calls'.length;
}
}
return out;
}
function parseToolCallsPayload(payload) {
let decoded;
try {
decoded = JSON.parse(payload);
} catch (_err) {
return [];
}
if (Array.isArray(decoded)) {
return parseToolCallList(decoded);
}
if (!decoded || typeof decoded !== 'object') {
return [];
}
if (decoded.tool_calls) {
return parseToolCallList(decoded.tool_calls);
}
const one = parseToolCallItem(decoded);
return one ? [one] : [];
}
function parseToolCallList(v) {
if (!Array.isArray(v)) {
return [];
}
const out = [];
for (const item of v) {
if (!item || typeof item !== 'object') {
continue;
}
const one = parseToolCallItem(item);
if (one) {
out.push(one);
}
}
return out;
}
function parseToolCallItem(m) {
let name = toStringSafe(m.name);
let inputRaw = m.input;
let hasInput = Object.prototype.hasOwnProperty.call(m, 'input');
const fn = m.function && typeof m.function === 'object' ? m.function : null;
if (fn) {
if (!name) {
name = toStringSafe(fn.name);
}
if (!hasInput && Object.prototype.hasOwnProperty.call(fn, 'arguments')) {
inputRaw = fn.arguments;
hasInput = true;
}
}
if (!hasInput) {
for (const k of ['arguments', 'args', 'parameters', 'params']) {
if (Object.prototype.hasOwnProperty.call(m, k)) {
inputRaw = m[k];
hasInput = true;
break;
}
}
}
if (!name) {
return null;
}
return { return {
name, calls: [],
input: parseToolCallInput(inputRaw), sawToolCallSyntax: false,
rejectedByPolicy: false,
rejectedToolNames: [],
}; };
} }
function parseToolCallInput(v) { function filterToolCallsDetailed(parsed, toolNames) {
if (v == null) { const sourceNames = Array.isArray(toolNames) ? toolNames : [];
return {}; const allowed = new Set();
} const allowedCanonical = new Map();
if (typeof v === 'string') { for (const item of sourceNames) {
const raw = toStringSafe(v); const name = toStringSafe(item);
if (!raw) { if (!name) {
return {}; continue;
} }
try { allowed.add(name);
const parsed = JSON.parse(raw); const lower = name.toLowerCase();
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) { if (!allowedCanonical.has(lower)) {
return parsed; allowedCanonical.set(lower, name);
}
return { _raw: raw };
} catch (_err) {
return { _raw: raw };
} }
} }
if (typeof v === 'object' && !Array.isArray(v)) {
return v;
}
try {
const parsed = JSON.parse(JSON.stringify(v));
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
return parsed;
}
} catch (_err) {
return {};
}
return {};
}
function filterToolCalls(parsed, toolNames) { if (allowed.size === 0) {
const allowed = new Set((toolNames || []).filter(Boolean)); const rejected = [];
const out = []; const seen = new Set();
for (const tc of parsed) {
if (!tc || !tc.name) {
continue;
}
if (seen.has(tc.name)) {
continue;
}
seen.add(tc.name);
rejected.push(tc.name);
}
return { calls: [], rejectedToolNames: rejected };
}
const calls = [];
const rejected = [];
const seenRejected = new Set();
for (const tc of parsed) { for (const tc of parsed) {
if (!tc || !tc.name) { if (!tc || !tc.name) {
continue; continue;
} }
if (allowed.size > 0 && !allowed.has(tc.name)) { let matchedName = '';
if (allowed.has(tc.name)) {
matchedName = tc.name;
} else {
matchedName = allowedCanonical.get(tc.name.toLowerCase()) || '';
}
if (!matchedName) {
if (!seenRejected.has(tc.name)) {
seenRejected.add(tc.name);
rejected.push(tc.name);
}
continue; continue;
} }
out.push({ name: tc.name, input: tc.input || {} }); calls.push({
name: matchedName,
input: tc.input && typeof tc.input === 'object' && !Array.isArray(tc.input) ? tc.input : {},
});
} }
return out; return { calls, rejectedToolNames: rejected };
} }
module.exports = { module.exports = {
extractToolNames, extractToolNames,
parseToolCalls, parseToolCalls,
parseToolCallsDetailed,
parseStandaloneToolCalls, parseStandaloneToolCalls,
parseStandaloneToolCallsDetailed,
}; };

View File

@@ -0,0 +1,196 @@
'use strict';
const TOOL_CALL_PATTERN = /\{\s*["']tool_calls["']\s*:\s*\[(.*?)\]\s*\}/s;
const {
toStringSafe,
} = require('./state');
const {
extractJSONObjectFrom,
} = require('./jsonscan');
function stripFencedCodeBlocks(text) {
const t = typeof text === 'string' ? text : '';
if (!t) {
return '';
}
return t.replace(/```[\s\S]*?```/g, ' ');
}
function buildToolCallCandidates(text) {
const trimmed = toStringSafe(text);
const candidates = [trimmed];
const fenced = trimmed.match(/```(?:json)?\s*([\s\S]*?)\s*```/gi) || [];
for (const block of fenced) {
const m = block.match(/```(?:json)?\s*([\s\S]*?)\s*```/i);
if (m && m[1]) {
candidates.push(toStringSafe(m[1]));
}
}
for (const candidate of extractToolCallObjects(trimmed)) {
candidates.push(toStringSafe(candidate));
}
const first = trimmed.indexOf('{');
const last = trimmed.lastIndexOf('}');
if (first >= 0 && last > first) {
candidates.push(toStringSafe(trimmed.slice(first, last + 1)));
}
const m = trimmed.match(TOOL_CALL_PATTERN);
if (m && m[1]) {
candidates.push(`{"tool_calls":[${m[1]}]}`);
}
return [...new Set(candidates.filter(Boolean))];
}
function extractToolCallObjects(text) {
const raw = toStringSafe(text);
if (!raw) {
return [];
}
const lower = raw.toLowerCase();
const out = [];
let offset = 0;
// eslint-disable-next-line no-constant-condition
while (true) {
let idx = lower.indexOf('tool_calls', offset);
if (idx < 0) {
break;
}
let start = raw.slice(0, idx).lastIndexOf('{');
while (start >= 0) {
const obj = extractJSONObjectFrom(raw, start);
if (obj.ok) {
out.push(raw.slice(start, obj.end).trim());
offset = obj.end;
idx = -1;
break;
}
start = raw.slice(0, start).lastIndexOf('{');
}
if (idx >= 0) {
offset = idx + 'tool_calls'.length;
}
}
return out;
}
function parseToolCallsPayload(payload) {
let decoded;
try {
decoded = JSON.parse(payload);
} catch (_err) {
return [];
}
if (Array.isArray(decoded)) {
return parseToolCallList(decoded);
}
if (!decoded || typeof decoded !== 'object') {
return [];
}
if (decoded.tool_calls) {
return parseToolCallList(decoded.tool_calls);
}
const one = parseToolCallItem(decoded);
return one ? [one] : [];
}
function parseToolCallList(v) {
if (!Array.isArray(v)) {
return [];
}
const out = [];
for (const item of v) {
if (!item || typeof item !== 'object') {
continue;
}
const one = parseToolCallItem(item);
if (one) {
out.push(one);
}
}
return out;
}
function parseToolCallItem(m) {
let name = toStringSafe(m.name);
let inputRaw = m.input;
let hasInput = Object.prototype.hasOwnProperty.call(m, 'input');
const fn = m.function && typeof m.function === 'object' ? m.function : null;
if (fn) {
if (!name) {
name = toStringSafe(fn.name);
}
if (!hasInput && Object.prototype.hasOwnProperty.call(fn, 'arguments')) {
inputRaw = fn.arguments;
hasInput = true;
}
}
if (!hasInput) {
for (const k of ['arguments', 'args', 'parameters', 'params']) {
if (Object.prototype.hasOwnProperty.call(m, k)) {
inputRaw = m[k];
hasInput = true;
break;
}
}
}
if (!name) {
return null;
}
return {
name,
input: parseToolCallInput(inputRaw),
};
}
function parseToolCallInput(v) {
if (v == null) {
return {};
}
if (typeof v === 'string') {
const raw = toStringSafe(v);
if (!raw) {
return {};
}
try {
const parsed = JSON.parse(raw);
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
return parsed;
}
return { _raw: raw };
} catch (_err) {
return { _raw: raw };
}
}
if (typeof v === 'object' && !Array.isArray(v)) {
return v;
}
try {
const parsed = JSON.parse(JSON.stringify(v));
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
return parsed;
}
} catch (_err) {
return {};
}
return {};
}
module.exports = {
stripFencedCodeBlocks,
buildToolCallCandidates,
parseToolCallsPayload,
};

View File

@@ -1,16 +1,12 @@
'use strict'; 'use strict';
const { const {
TOOL_SIEVE_CAPTURE_LIMIT,
resetIncrementalToolState, resetIncrementalToolState,
noteText, noteText,
insideCodeFence, insideCodeFence,
} = require('./state'); } = require('./state');
const { const {
buildIncrementalToolDeltas, parseStandaloneToolCallsDetailed,
} = require('./incremental');
const {
parseStandaloneToolCalls,
} = require('./parse'); } = require('./parse');
const { const {
extractJSONObjectFrom, extractJSONObjectFrom,
@@ -24,6 +20,21 @@ function processToolSieveChunk(state, chunk, toolNames) {
state.pending += chunk; state.pending += chunk;
} }
const events = []; const events = [];
if (Array.isArray(state.pendingToolCalls) && state.pendingToolCalls.length > 0) {
const pending = state.pending || '';
if (pending.trim() !== '') {
const content = (state.pendingToolRaw || '') + pending;
state.pending = '';
state.pendingToolRaw = '';
state.pendingToolCalls = [];
noteText(state, content);
events.push({ type: 'text', text: content });
} else {
return events;
}
}
// eslint-disable-next-line no-constant-condition // eslint-disable-next-line no-constant-condition
while (true) { while (true) {
if (state.capturing) { if (state.capturing) {
@@ -31,57 +42,50 @@ function processToolSieveChunk(state, chunk, toolNames) {
state.capture += state.pending; state.capture += state.pending;
state.pending = ''; state.pending = '';
} }
const deltas = buildIncrementalToolDeltas(state);
if (deltas.length > 0) {
events.push({ type: 'tool_call_deltas', deltas });
}
const consumed = consumeToolCapture(state, toolNames); const consumed = consumeToolCapture(state, toolNames);
if (!consumed.ready) { if (!consumed.ready) {
if (state.capture.length > TOOL_SIEVE_CAPTURE_LIMIT) {
noteText(state, state.capture);
events.push({ type: 'text', text: state.capture });
state.capture = '';
state.capturing = false;
resetIncrementalToolState(state);
continue;
}
break; break;
} }
const captured = state.capture;
state.capture = ''; state.capture = '';
state.capturing = false; state.capturing = false;
resetIncrementalToolState(state); resetIncrementalToolState(state);
if (Array.isArray(consumed.calls) && consumed.calls.length > 0) {
state.pendingToolRaw = captured;
state.pendingToolCalls = consumed.calls;
continue;
}
if (consumed.prefix) { if (consumed.prefix) {
noteText(state, consumed.prefix); noteText(state, consumed.prefix);
events.push({ type: 'text', text: consumed.prefix }); events.push({ type: 'text', text: consumed.prefix });
} }
if (Array.isArray(consumed.calls) && consumed.calls.length > 0) {
events.push({ type: 'tool_calls', calls: consumed.calls });
}
if (consumed.suffix) { if (consumed.suffix) {
state.pending += consumed.suffix; state.pending += consumed.suffix;
} }
continue; continue;
} }
if (!state.pending) { const pending = state.pending || '';
if (!pending) {
break; break;
} }
const start = findToolSegmentStart(state.pending); const start = findToolSegmentStart(pending);
if (start >= 0) { if (start >= 0) {
const prefix = state.pending.slice(0, start); const prefix = pending.slice(0, start);
if (prefix) { if (prefix) {
noteText(state, prefix); noteText(state, prefix);
events.push({ type: 'text', text: prefix }); events.push({ type: 'text', text: prefix });
} }
state.capture = state.pending.slice(start);
state.pending = ''; state.pending = '';
state.capture += pending.slice(start);
state.capturing = true; state.capturing = true;
resetIncrementalToolState(state); resetIncrementalToolState(state);
continue; continue;
} }
const [safe, hold] = splitSafeContentForToolDetection(state.pending); const [safe, hold] = splitSafeContentForToolDetection(pending);
if (!safe) { if (!safe) {
break; break;
} }
@@ -97,6 +101,13 @@ function flushToolSieve(state, toolNames) {
return []; return [];
} }
const events = processToolSieveChunk(state, '', toolNames); const events = processToolSieveChunk(state, '', toolNames);
if (Array.isArray(state.pendingToolCalls) && state.pendingToolCalls.length > 0) {
events.push({ type: 'tool_calls', calls: state.pendingToolCalls });
state.pendingToolRaw = '';
state.pendingToolCalls = [];
}
if (state.capturing) { if (state.capturing) {
const consumed = consumeToolCapture(state, toolNames); const consumed = consumeToolCapture(state, toolNames);
if (consumed.ready) { if (consumed.ready) {
@@ -119,11 +130,13 @@ function flushToolSieve(state, toolNames) {
state.capturing = false; state.capturing = false;
resetIncrementalToolState(state); resetIncrementalToolState(state);
} }
if (state.pending) { if (state.pending) {
noteText(state, state.pending); noteText(state, state.pending);
events.push({ type: 'text', text: state.pending }); events.push({ type: 'text', text: state.pending });
state.pending = ''; state.pending = '';
} }
return events; return events;
} }
@@ -163,11 +176,10 @@ function findToolSegmentStart(s) {
let offset = 0; let offset = 0;
// eslint-disable-next-line no-constant-condition // eslint-disable-next-line no-constant-condition
while (true) { while (true) {
const keyRel = lower.indexOf('tool_calls', offset); const keyIdx = lower.indexOf('tool_calls', offset);
if (keyRel < 0) { if (keyIdx < 0) {
return -1; return -1;
} }
const keyIdx = keyRel;
const start = s.slice(0, keyIdx).lastIndexOf('{'); const start = s.slice(0, keyIdx).lastIndexOf('{');
const candidateStart = start >= 0 ? start : keyIdx; const candidateStart = start >= 0 ? start : keyIdx;
if (!insideCodeFence(s.slice(0, candidateStart))) { if (!insideCodeFence(s.slice(0, candidateStart))) {
@@ -178,7 +190,7 @@ function findToolSegmentStart(s) {
} }
function consumeToolCapture(state, toolNames) { function consumeToolCapture(state, toolNames) {
const captured = state.capture; const captured = state.capture || '';
if (!captured) { if (!captured) {
return { ready: false, prefix: '', calls: [], suffix: '' }; return { ready: false, prefix: '', calls: [], suffix: '' };
} }
@@ -195,8 +207,10 @@ function consumeToolCapture(state, toolNames) {
if (!obj.ok) { if (!obj.ok) {
return { ready: false, prefix: '', calls: [], suffix: '' }; return { ready: false, prefix: '', calls: [], suffix: '' };
} }
const prefixPart = captured.slice(0, start); const prefixPart = captured.slice(0, start);
const suffixPart = captured.slice(obj.end); const suffixPart = captured.slice(obj.end);
if (insideCodeFence((state.recentTextTail || '') + prefixPart)) { if (insideCodeFence((state.recentTextTail || '') + prefixPart)) {
return { return {
ready: true, ready: true,
@@ -205,18 +219,19 @@ function consumeToolCapture(state, toolNames) {
suffix: '', suffix: '',
}; };
} }
const rawParsed = parseStandaloneToolCalls(captured.slice(start, obj.end), []);
const parsed = parseStandaloneToolCalls(captured.slice(start, obj.end), toolNames); if ((state.recentTextTail || '').trim() !== '' || prefixPart.trim() !== '' || suffixPart.trim() !== '') {
if (parsed.length === 0) { return {
if (rawParsed.length > 0 && Array.isArray(toolNames) && toolNames.length > 0) { ready: true,
return { prefix: captured,
ready: true, calls: [],
prefix: prefixPart, suffix: '',
calls: [], };
suffix: suffixPart, }
};
} const parsed = parseStandaloneToolCallsDetailed(captured.slice(start, obj.end), toolNames);
if (state.toolNameSent) { if (!Array.isArray(parsed.calls) || parsed.calls.length === 0) {
if (parsed.sawToolCallSyntax && parsed.rejectedByPolicy) {
return { return {
ready: true, ready: true,
prefix: prefixPart, prefix: prefixPart,
@@ -231,26 +246,11 @@ function consumeToolCapture(state, toolNames) {
suffix: '', suffix: '',
}; };
} }
if (state.toolNameSent) {
if (parsed.length > 1) {
return {
ready: true,
prefix: prefixPart,
calls: parsed.slice(1),
suffix: suffixPart,
};
}
return {
ready: true,
prefix: prefixPart,
calls: [],
suffix: suffixPart,
};
}
return { return {
ready: true, ready: true,
prefix: prefixPart, prefix: prefixPart,
calls: parsed, calls: parsed.calls,
suffix: suffixPart, suffix: suffixPart,
}; };
} }

View File

@@ -1,6 +1,5 @@
'use strict'; 'use strict';
const TOOL_SIEVE_CAPTURE_LIMIT = 8 * 1024;
const TOOL_SIEVE_CONTEXT_TAIL_LIMIT = 256; const TOOL_SIEVE_CONTEXT_TAIL_LIMIT = 256;
function createToolSieveState() { function createToolSieveState() {
@@ -9,6 +8,9 @@ function createToolSieveState() {
capture: '', capture: '',
capturing: false, capturing: false,
recentTextTail: '', recentTextTail: '',
pendingToolRaw: '',
pendingToolCalls: [],
disableDeltas: false,
toolNameSent: false, toolNameSent: false,
toolName: '', toolName: '',
toolArgsStart: -1, toolArgsStart: -1,
@@ -19,6 +21,7 @@ function createToolSieveState() {
} }
function resetIncrementalToolState(state) { function resetIncrementalToolState(state) {
state.disableDeltas = false;
state.toolNameSent = false; state.toolNameSent = false;
state.toolName = ''; state.toolName = '';
state.toolArgsStart = -1; state.toolArgsStart = -1;
@@ -78,7 +81,6 @@ function toStringSafe(v) {
} }
module.exports = { module.exports = {
TOOL_SIEVE_CAPTURE_LIMIT,
TOOL_SIEVE_CONTEXT_TAIL_LIMIT, TOOL_SIEVE_CONTEXT_TAIL_LIMIT,
createToolSieveState, createToolSieveState,
resetIncrementalToolState, resetIncrementalToolState,

View File

@@ -57,16 +57,20 @@ func NewApp() *App {
r.Use(cors) r.Use(cors)
r.Use(timeout(0)) r.Use(timeout(0))
r.Get("/healthz", func(w http.ResponseWriter, _ *http.Request) { healthzHandler := func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(`{"status":"ok"}`)) _, _ = w.Write([]byte(`{"status":"ok"}`))
}) }
r.Get("/readyz", func(w http.ResponseWriter, _ *http.Request) { readyzHandler := func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(`{"status":"ready"}`)) _, _ = w.Write([]byte(`{"status":"ready"}`))
}) }
r.Get("/healthz", healthzHandler)
r.Head("/healthz", healthzHandler)
r.Get("/readyz", readyzHandler)
r.Head("/readyz", readyzHandler)
openai.RegisterRoutes(r, openaiHandler) openai.RegisterRoutes(r, openaiHandler)
claude.RegisterRoutes(r, claudeHandler) claude.RegisterRoutes(r, claudeHandler)
gemini.RegisterRoutes(r, geminiHandler) gemini.RegisterRoutes(r, geminiHandler)

View File

@@ -0,0 +1,20 @@
package server
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestHealthEndpointsSupportHEAD(t *testing.T) {
app := NewApp()
for _, path := range []string{"/healthz", "/readyz"} {
req := httptest.NewRequest(http.MethodHead, path, nil)
rec := httptest.NewRecorder()
app.Router.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Fatalf("expected %s HEAD status 200, got %d", path, rec.Code)
}
}
}

View File

@@ -17,6 +17,12 @@ func (r *Runner) caseHealthz(ctx context.Context, cc *caseContext) error {
var m map[string]any var m map[string]any
_ = json.Unmarshal(resp.Body, &m) _ = json.Unmarshal(resp.Body, &m)
cc.assert("status_ok", asString(m["status"]) == "ok", fmt.Sprintf("body=%s", string(resp.Body))) cc.assert("status_ok", asString(m["status"]) == "ok", fmt.Sprintf("body=%s", string(resp.Body)))
headResp, headErr := cc.request(ctx, requestSpec{Method: http.MethodHead, Path: "/healthz", Retryable: true})
if headErr != nil {
return headErr
}
cc.assert("head_status_200", headResp.StatusCode == http.StatusOK, fmt.Sprintf("status=%d", headResp.StatusCode))
return nil return nil
} }
@@ -29,6 +35,12 @@ func (r *Runner) caseReadyz(ctx context.Context, cc *caseContext) error {
var m map[string]any var m map[string]any
_ = json.Unmarshal(resp.Body, &m) _ = json.Unmarshal(resp.Body, &m)
cc.assert("status_ready", asString(m["status"]) == "ready", fmt.Sprintf("body=%s", string(resp.Body))) cc.assert("status_ready", asString(m["status"]) == "ready", fmt.Sprintf("body=%s", string(resp.Body)))
headResp, headErr := cc.request(ctx, requestSpec{Method: http.MethodHead, Path: "/readyz", Retryable: true})
if headErr != nil {
return headErr
}
cc.assert("head_status_200", headResp.StatusCode == http.StatusOK, fmt.Sprintf("status=%d", headResp.StatusCode))
return nil return nil
} }

View File

@@ -16,7 +16,6 @@ internal/js/helpers/stream-tool-sieve.js
internal/js/helpers/stream-tool-sieve/index.js internal/js/helpers/stream-tool-sieve/index.js
internal/js/helpers/stream-tool-sieve/state.js internal/js/helpers/stream-tool-sieve/state.js
internal/js/helpers/stream-tool-sieve/sieve.js internal/js/helpers/stream-tool-sieve/sieve.js
internal/js/helpers/stream-tool-sieve/incremental.js
internal/js/helpers/stream-tool-sieve/jsonscan.js internal/js/helpers/stream-tool-sieve/jsonscan.js
internal/js/helpers/stream-tool-sieve/parse.js internal/js/helpers/stream-tool-sieve/parse.js
internal/js/helpers/stream-tool-sieve/format.js internal/js/helpers/stream-tool-sieve/format.js

View File

@@ -105,7 +105,6 @@ internal/js/helpers/stream-tool-sieve.js
internal/js/helpers/stream-tool-sieve/index.js internal/js/helpers/stream-tool-sieve/index.js
internal/js/helpers/stream-tool-sieve/state.js internal/js/helpers/stream-tool-sieve/state.js
internal/js/helpers/stream-tool-sieve/sieve.js internal/js/helpers/stream-tool-sieve/sieve.js
internal/js/helpers/stream-tool-sieve/incremental.js
internal/js/helpers/stream-tool-sieve/jsonscan.js internal/js/helpers/stream-tool-sieve/jsonscan.js
internal/js/helpers/stream-tool-sieve/parse.js internal/js/helpers/stream-tool-sieve/parse.js
internal/js/helpers/stream-tool-sieve/format.js internal/js/helpers/stream-tool-sieve/format.js

566
start.mjs Normal file
View File

@@ -0,0 +1,566 @@
#!/usr/bin/env node
/**
* DS2API 启动脚本 - 交互式菜单
*
* 使用方法:
* node start.mjs # 显示交互式菜单
* node start.mjs dev # 开发模式(后端 + 前端热重载)
* node start.mjs prod # 生产模式(编译后运行)
* node start.mjs build # 编译后端二进制
* node start.mjs webui # 构建前端静态文件
* node start.mjs install # 安装前端依赖
* node start.mjs stop # 停止所有服务
* node start.mjs status # 查看服务状态
*/
import { spawn, execSync } from 'child_process';
import { createInterface } from 'readline';
import { existsSync } from 'fs';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// 判断是否为 Windows
const isWindows = process.platform === 'win32';
// 编译产物路径
const BINARY = join(__dirname, isWindows ? 'ds2api.exe' : 'ds2api');
// 配置(从环境变量读取,与 Go 主程序保持一致)
const CONFIG = {
port: process.env.PORT || '5001',
frontendPort: 5173,
logLevel: process.env.LOG_LEVEL || 'INFO',
adminKey: process.env.DS2API_ADMIN_KEY || 'admin',
webuiDir: join(__dirname, 'webui'),
staticAdminDir: process.env.DS2API_STATIC_ADMIN_DIR || join(__dirname, 'static', 'admin'),
};
// 国内镜像配置
const MIRRORS = {
goproxy: process.env.GOPROXY || 'https://goproxy.cn,direct',
npm: process.env.NPM_REGISTRY || 'https://registry.npmmirror.com',
};
// 存储子进程
const processes = [];
// 颜色输出
const colors = {
reset: '\x1b[0m',
bright: '\x1b[1m',
dim: '\x1b[2m',
red: '\x1b[31m',
green: '\x1b[32m',
yellow: '\x1b[33m',
blue: '\x1b[34m',
magenta: '\x1b[35m',
cyan: '\x1b[36m',
};
const log = {
info: (msg) => console.log(`${colors.cyan}[INFO]${colors.reset} ${msg}`),
success: (msg) => console.log(`${colors.green}[OK]${colors.reset} ${msg}`),
warn: (msg) => console.log(`${colors.yellow}[WARN]${colors.reset} ${msg}`),
error: (msg) => console.log(`${colors.red}[ERROR]${colors.reset} ${msg}`),
title: (msg) => console.log(`\n${colors.bright}${colors.magenta}${msg}${colors.reset}`),
};
// 清理并退出
function cleanup() {
console.log('\n');
log.info('正在关闭所有服务...');
processes.forEach(proc => {
if (proc && !proc.killed) {
proc.kill('SIGTERM');
}
});
log.success('已退出');
process.exit(0);
}
process.on('SIGINT', cleanup);
process.on('SIGTERM', cleanup);
// 检查命令是否存在
function commandExists(cmd) {
try {
execSync(`${isWindows ? 'where' : 'which'} ${cmd}`, { stdio: 'ignore' });
return true;
} catch {
return false;
}
}
// 检查 Go 是否安装
function checkGo() {
return commandExists('go');
}
// 获取 Go 版本
function getGoVersion() {
try {
return execSync('go version', { encoding: 'utf-8' }).trim();
} catch {
return null;
}
}
// 检查前端依赖是否已安装
function checkFrontendDeps() {
if (!existsSync(CONFIG.webuiDir)) return null;
return existsSync(join(CONFIG.webuiDir, 'node_modules'));
}
// 检查前端是否已构建
function checkWebuiBuilt() {
return existsSync(join(CONFIG.staticAdminDir, 'index.html'));
}
// 检查后端二进制是否存在
function binaryExists() {
return existsSync(BINARY);
}
// 查找占用端口的进程 PID
function findPidByPort(port) {
try {
if (isWindows) {
const output = execSync(`netstat -ano | findstr :${port} | findstr LISTENING`, {
encoding: 'utf-8',
shell: true,
stdio: ['pipe', 'pipe', 'ignore'],
});
const pids = new Set();
for (const line of output.trim().split('\n')) {
const parts = line.trim().split(/\s+/);
const pid = parts[parts.length - 1];
if (pid && pid !== '0') pids.add(pid);
}
return [...pids];
} else {
const output = execSync(`lsof -ti :${port}`, {
encoding: 'utf-8',
stdio: ['pipe', 'pipe', 'ignore'],
});
return output.trim().split('\n').filter(Boolean);
}
} catch {
return [];
}
}
// 获取运行中的服务状态
function getRunningStatus() {
const backendPids = findPidByPort(CONFIG.port);
const frontendPids = findPidByPort(CONFIG.frontendPort);
return {
backend: backendPids,
frontend: frontendPids,
isRunning: backendPids.length > 0 || frontendPids.length > 0,
};
}
// 停止服务
async function stopServices() {
const running = getRunningStatus();
if (!running.isRunning) {
log.warn('没有检测到正在运行的服务');
return;
}
log.title('========== 停止服务 ==========');
const killProcess = async (pid) => {
try {
if (isWindows) {
try {
execSync(`taskkill /PID ${pid}`, { stdio: 'ignore', shell: true });
} catch {
execSync(`taskkill /F /T /PID ${pid}`, { stdio: 'ignore', shell: true });
}
} else {
execSync(`kill -15 ${pid}`, { stdio: 'ignore' });
await new Promise(r => setTimeout(r, 500));
try {
execSync(`kill -0 ${pid}`, { stdio: 'ignore' });
execSync(`kill -9 ${pid}`, { stdio: 'ignore' });
} catch { /* 进程已退出 */ }
}
} catch { /* 进程可能已退出 */ }
};
if (running.backend.length > 0) {
log.info(`停止后端服务 (端口 ${CONFIG.port}, PID: ${running.backend.join(', ')})...`);
for (const pid of running.backend) await killProcess(pid);
log.success('后端服务已停止');
}
if (running.frontend.length > 0) {
log.info(`停止前端服务 (端口 ${CONFIG.frontendPort}, PID: ${running.frontend.join(', ')})...`);
for (const pid of running.frontend) await killProcess(pid);
log.success('前端服务已停止');
}
}
// 安装前端依赖
async function installFrontendDeps() {
if (!existsSync(CONFIG.webuiDir)) {
log.warn('webui 目录不存在,跳过前端依赖安装');
return;
}
log.info(`安装前端依赖 (npm ci, registry: ${MIRRORS.npm})...`);
return new Promise((resolve, reject) => {
const proc = spawn('npm', ['ci', '--registry', MIRRORS.npm], {
cwd: CONFIG.webuiDir,
stdio: 'inherit',
shell: true,
});
proc.on('close', code => code === 0 ? resolve() : reject(new Error('前端依赖安装失败')));
});
}
// 确保前端依赖已安装
async function ensureFrontendDeps() {
if (checkFrontendDeps() === false) {
log.warn('检测到前端依赖未安装,正在安装...');
await installFrontendDeps();
}
}
// 编译后端二进制
async function buildBackend() {
if (!checkGo()) throw new Error('未找到 Go请先安装 Go (https://go.dev/dl/)');
log.info(`编译后端二进制 (GOPROXY: ${MIRRORS.goproxy})...`);
return new Promise((resolve, reject) => {
const proc = spawn('go', ['build', '-o', BINARY, './cmd/ds2api'], {
cwd: __dirname,
stdio: 'inherit',
shell: true,
env: { ...process.env, GOPROXY: MIRRORS.goproxy },
});
proc.on('close', code => code === 0 ? resolve() : reject(new Error('后端编译失败')));
});
}
// 构建前端静态文件
async function buildWebui() {
if (!existsSync(CONFIG.webuiDir)) {
log.warn('webui 目录不存在');
return;
}
await ensureFrontendDeps();
log.info('构建前端静态文件...');
return new Promise((resolve, reject) => {
const proc = spawn(
'npm', ['run', 'build', '--', '--outDir', CONFIG.staticAdminDir, '--emptyOutDir'],
{ cwd: CONFIG.webuiDir, stdio: 'inherit', shell: true }
);
proc.on('close', code => code === 0 ? resolve() : reject(new Error('前端构建失败')));
});
}
// 启动后端开发模式go run无需预编译
async function startBackendDev() {
if (!checkGo()) throw new Error('未找到 Go请先安装 Go (https://go.dev/dl/)');
log.info(`启动后端go run... http://localhost:${CONFIG.port}`);
const proc = spawn('go', ['run', './cmd/ds2api'], {
cwd: __dirname,
stdio: 'inherit',
shell: true,
env: {
...process.env,
PORT: CONFIG.port,
LOG_LEVEL: CONFIG.logLevel,
DS2API_ADMIN_KEY: CONFIG.adminKey,
GOPROXY: MIRRORS.goproxy,
},
});
processes.push(proc);
return proc;
}
// 启动后端(生产模式:运行编译好的二进制)
async function startBackendProd() {
if (!binaryExists()) {
log.warn('未找到编译产物,正在编译...');
await buildBackend();
}
log.info(`启动后端(二进制)... http://localhost:${CONFIG.port}`);
const proc = spawn(BINARY, [], {
cwd: __dirname,
stdio: 'inherit',
shell: false,
env: {
...process.env,
PORT: CONFIG.port,
LOG_LEVEL: CONFIG.logLevel,
DS2API_ADMIN_KEY: CONFIG.adminKey,
},
});
processes.push(proc);
return proc;
}
// 启动前端开发服务器
async function startFrontend() {
if (!existsSync(CONFIG.webuiDir)) {
log.warn('webui 目录不存在,跳过前端启动');
return null;
}
await ensureFrontendDeps();
log.info(`启动前端开发服务器... http://localhost:${CONFIG.frontendPort}`);
const proc = spawn('npm', ['run', 'dev'], {
cwd: CONFIG.webuiDir,
stdio: 'inherit',
shell: true,
});
processes.push(proc);
return proc;
}
// 显示状态信息
function showStatus() {
console.log('\n' + '─'.repeat(50));
log.success(`后端 API: http://localhost:${CONFIG.port}`);
log.success(`管理界面: http://localhost:${CONFIG.port}/admin`);
if (existsSync(CONFIG.webuiDir)) {
log.success(`前端 Dev: http://localhost:${CONFIG.frontendPort}`);
}
console.log('─'.repeat(50));
log.info('按 Ctrl+C 停止所有服务\n');
}
// 等待进程退出
function waitForProcesses() {
return new Promise(resolve => {
const check = setInterval(() => {
const activeCount = processes.filter(proc => proc.exitCode === null && proc.signalCode === null).length;
if (activeCount === 0) {
clearInterval(check);
resolve();
}
}, 1000);
});
}
// 交互式菜单
async function showMenu() {
const rl = createInterface({ input: process.stdin, output: process.stdout });
const question = (prompt) => new Promise(resolve => rl.question(prompt, resolve));
console.clear();
log.title('╔══════════════════════════════════════════╗');
log.title('║ DS2API 启动脚本 (Go) ║');
log.title('╚══════════════════════════════════════════╝');
// 环境状态
const goVersion = getGoVersion();
const frontendDeps = checkFrontendDeps();
const webuiBuilt = checkWebuiBuilt();
const hasBinary = binaryExists();
const running = getRunningStatus();
const ok = (v) => v ? `${colors.green}${colors.reset}` : `${colors.yellow}${colors.reset}`;
console.log(`\n${colors.bright}环境状态:${colors.reset}`);
console.log(` Go: ${goVersion ? `${colors.green}${goVersion}${colors.reset}` : `${colors.red}未安装${colors.reset}`}`);
console.log(` 前端依赖: ${frontendDeps === null ? `${colors.dim}N/A${colors.reset}` : frontendDeps ? `${colors.green}已安装${colors.reset}` : `${colors.yellow}未安装${colors.reset}`}`);
console.log(` 前端构建: ${ok(webuiBuilt)} ${webuiBuilt ? `(${CONFIG.staticAdminDir})` : '未构建'}`);
console.log(` 后端二进制: ${ok(hasBinary)} ${hasBinary ? BINARY : '未编译'}`);
console.log(`\n${colors.bright}服务状态:${colors.reset}`);
console.log(` 后端 (:${CONFIG.port}): ${running.backend.length > 0 ? `${colors.green}运行中${colors.reset} (PID: ${running.backend.join(', ')})` : `${colors.dim}未运行${colors.reset}`}`);
console.log(` 前端 (:${CONFIG.frontendPort}): ${running.frontend.length > 0 ? `${colors.green}运行中${colors.reset} (PID: ${running.frontend.join(', ')})` : `${colors.dim}未运行${colors.reset}`}`);
console.log(`\n${colors.bright}环境变量:${colors.reset}`);
console.log(` PORT: ${colors.cyan}${CONFIG.port}${colors.reset}`);
console.log(` LOG_LEVEL: ${colors.cyan}${CONFIG.logLevel}${colors.reset}`);
console.log(` DS2API_ADMIN_KEY: ${colors.cyan}${CONFIG.adminKey}${colors.reset}`);
console.log(` GOPROXY: ${colors.cyan}${MIRRORS.goproxy}${colors.reset}`);
console.log(` NPM_REGISTRY: ${colors.cyan}${MIRRORS.npm}${colors.reset}`);
console.log(`${colors.dim} 自定义: DS2API_ADMIN_KEY=密钥 PORT=5001 node start.mjs${colors.reset}`);
console.log(`
${colors.bright}请选择操作:${colors.reset}
${colors.cyan}1.${colors.reset} 开发模式 (go run + 前端热重载)
${colors.cyan}2.${colors.reset} 仅后端 (go run无需编译)
${colors.cyan}3.${colors.reset} 仅前端 (npm dev)
${colors.cyan}4.${colors.reset} 生产模式 (编译后运行,前端已嵌入)
${colors.cyan}5.${colors.reset} 编译后端 (go build)
${colors.cyan}6.${colors.reset} 构建前端 (npm build → static/admin)
${colors.cyan}7.${colors.reset} 安装前端依赖 (npm ci)
${colors.red}8.${colors.reset} 停止所有服务
${colors.cyan}0.${colors.reset} 退出
`);
const choice = await question(`${colors.yellow}请输入选项 [1]: ${colors.reset}`);
rl.close();
switch (choice.trim() || '1') {
case '1':
log.title('========== 开发模式 ==========');
await startBackendDev();
await new Promise(r => setTimeout(r, 1500));
await startFrontend();
showStatus();
await waitForProcesses();
break;
case '2':
log.title('========== 仅后端 (go run) ==========');
await startBackendDev();
showStatus();
await waitForProcesses();
break;
case '3':
log.title('========== 仅前端 ==========');
await startFrontend();
showStatus();
await waitForProcesses();
break;
case '4':
log.title('========== 生产模式 ==========');
await startBackendProd();
showStatus();
await waitForProcesses();
break;
case '5':
log.title('========== 编译后端 ==========');
await buildBackend();
log.success(`编译完成:${BINARY}`);
break;
case '6':
log.title('========== 构建前端 ==========');
await buildWebui();
log.success('前端构建完成!');
break;
case '7':
log.title('========== 安装前端依赖 ==========');
await installFrontendDeps();
log.success('前端依赖安装完成!');
break;
case '8':
await stopServices();
break;
case '0':
log.info('再见!');
process.exit(0);
break;
default:
log.warn('无效选项');
await showMenu();
}
}
// 命令行参数处理
async function main() {
const cmd = process.argv[2];
if (!checkGo() && !['install', 'webui', 'stop', 'status', 'help', '-h', '--help'].includes(cmd)) {
log.error('未找到 Go请先安装 Go: https://go.dev/dl/');
if (!cmd) {
// 无 Go 时仍允许进入菜单(可以只操作前端)
} else {
process.exit(1);
}
}
switch (cmd) {
case 'dev':
log.title('========== 开发模式 ==========');
await startBackendDev();
await new Promise(r => setTimeout(r, 1500));
await startFrontend();
showStatus();
await waitForProcesses();
break;
case 'prod':
log.title('========== 生产模式 ==========');
await startBackendProd();
showStatus();
await waitForProcesses();
break;
case 'build':
await buildBackend();
log.success(`编译完成:${BINARY}`);
break;
case 'webui':
await buildWebui();
log.success('前端构建完成!');
break;
case 'install':
await installFrontendDeps();
log.success('前端依赖安装完成!');
break;
case 'stop':
await stopServices();
break;
case 'status': {
const status = getRunningStatus();
const goVer = getGoVersion();
console.log(`\n${colors.bright}环境:${colors.reset}`);
console.log(` Go: ${goVer || `${colors.red}未安装${colors.reset}`}`);
console.log(`\n${colors.bright}服务状态:${colors.reset}`);
console.log(` 后端 (:${CONFIG.port}): ${status.backend.length > 0 ? `${colors.green}运行中${colors.reset} (PID: ${status.backend.join(', ')})` : `${colors.dim}未运行${colors.reset}`}`);
console.log(` 前端 (:${CONFIG.frontendPort}): ${status.frontend.length > 0 ? `${colors.green}运行中${colors.reset} (PID: ${status.frontend.join(', ')})` : `${colors.dim}未运行${colors.reset}`}\n`);
break;
}
case 'help':
case '-h':
case '--help':
console.log(`
${colors.bright}DS2API 启动脚本 (Go)${colors.reset}
${colors.cyan}使用方法:${colors.reset}
node start.mjs 显示交互式菜单
node start.mjs dev 开发模式 (go run + 前端热重载)
node start.mjs prod 生产模式 (编译产物,前端已嵌入)
node start.mjs build 编译后端二进制 (go build)
node start.mjs webui 构建前端静态文件
node start.mjs install 安装前端依赖 (npm ci)
node start.mjs stop 停止所有服务
node start.mjs status 查看服务状态
${colors.cyan}常用环境变量:${colors.reset}
PORT 后端端口 (默认: 5001)
LOG_LEVEL 日志级别: DEBUG|INFO|WARN|ERROR (默认: INFO)
DS2API_ADMIN_KEY 管理员密钥 (默认: admin)
DS2API_CONFIG_PATH 配置文件路径 (默认: config.json)
GOPROXY Go 模块代理 (默认: https://goproxy.cn,direct)
NPM_REGISTRY npm 镜像源 (默认: https://registry.npmmirror.com)
${colors.cyan}示例:${colors.reset}
DS2API_ADMIN_KEY=mykey PORT=8080 node start.mjs dev
GOPROXY=off NPM_REGISTRY=https://registry.npmjs.org node start.mjs dev
`);
break;
default:
await showMenu();
}
}
main().catch(e => {
log.error(e.message);
process.exit(1);
});

View File

@@ -0,0 +1,3 @@
{
"calls": []
}

View File

@@ -0,0 +1,10 @@
{
"calls": [
{
"name": "read_file",
"input": {
"path": "README.MD"
}
}
]
}

View File

@@ -0,0 +1,3 @@
{
"calls": []
}

View File

@@ -0,0 +1,3 @@
{
"calls": []
}

View File

@@ -0,0 +1,10 @@
{
"calls": [
{
"name": "read_file",
"input": {
"path": "README.MD"
}
}
]
}

View File

@@ -0,0 +1,4 @@
{
"text": "{\"tool_calls\":[{\"name\":\"unknown_tool\",\"input\":{\"x\":1}}]}",
"tool_names": []
}

View File

@@ -0,0 +1,4 @@
{
"text": "{\"tool_calls\":[{\"name\":\"Read_File\",\"input\":{\"path\":\"README.MD\"}}]}",
"tool_names": ["read_file"]
}

View File

@@ -0,0 +1,5 @@
{
"mode": "standalone",
"text": "```json\n{\"tool_calls\":[{\"name\":\"read_file\",\"input\":{\"path\":\"README.MD\"}}]}\n```",
"tool_names": ["read_file"]
}

View File

@@ -0,0 +1,5 @@
{
"mode": "standalone",
"text": "下面是示例:{\"tool_calls\":[{\"name\":\"read_file\",\"input\":{\"path\":\"README.MD\"}}]}请勿执行。",
"tool_names": ["read_file"]
}

View File

@@ -0,0 +1,5 @@
{
"mode": "standalone",
"text": "{\"tool_calls\":[{\"name\":\"read_file\",\"input\":{\"path\":\"README.MD\"}}]}",
"tool_names": ["read_file"]
}

View File

@@ -13,8 +13,10 @@ const {
const { const {
parseChunkForContent, parseChunkForContent,
resolveToolcallPolicy, resolveToolcallPolicy,
formatIncrementalToolCallDeltas,
normalizePreparedToolNames, normalizePreparedToolNames,
boolDefaultTrue, boolDefaultTrue,
filterIncrementalToolCallDeltasByAllowed,
} = handler.__test; } = handler.__test;
test('chat-stream exposes parser test hooks', () => { test('chat-stream exposes parser test hooks', () => {
@@ -56,6 +58,46 @@ test('boolDefaultTrue keeps false only when explicitly false', () => {
assert.equal(boolDefaultTrue(undefined), true); assert.equal(boolDefaultTrue(undefined), true);
}); });
test('filterIncrementalToolCallDeltasByAllowed blocks unknown name and follow-up args', () => {
const seen = new Map();
const filtered = filterIncrementalToolCallDeltasByAllowed(
[
{ index: 0, name: 'not_in_schema' },
{ index: 0, arguments: '{"x":1}' },
],
['read_file'],
seen,
);
assert.deepEqual(filtered, []);
assert.equal(seen.get(0), '__blocked__');
});
test('filterIncrementalToolCallDeltasByAllowed keeps allowed name and args', () => {
const seen = new Map();
const filtered = filterIncrementalToolCallDeltasByAllowed(
[
{ index: 0, name: 'read_file' },
{ index: 0, arguments: '{"path":"README.MD"}' },
],
['read_file'],
seen,
);
assert.deepEqual(filtered, [
{ index: 0, name: 'read_file' },
{ index: 0, arguments: '{"path":"README.MD"}' },
]);
});
test('incremental and final tool formatting share stable id via idStore', () => {
const idStore = new Map();
const incremental = formatIncrementalToolCallDeltas([{ index: 0, name: 'read_file' }], idStore);
const { formatOpenAIStreamToolCalls } = require('../../internal/js/helpers/stream-tool-sieve.js');
const finalCalls = formatOpenAIStreamToolCalls([{ name: 'read_file', input: { path: 'README.MD' } }], idStore);
assert.equal(incremental.length, 1);
assert.equal(finalCalls.length, 1);
assert.equal(incremental[0].id, finalCalls[0].id);
});
test('parseChunkForContent keeps split response/content fragments inside response array', () => { test('parseChunkForContent keeps split response/content fragments inside response array', () => {
const chunk = { const chunk = {
p: 'response', p: 'response',

View File

@@ -6,7 +6,7 @@ const fs = require('node:fs');
const path = require('node:path'); const path = require('node:path');
const chatStream = require('../../api/chat-stream.js'); const chatStream = require('../../api/chat-stream.js');
const { parseToolCalls } = require('../../internal/js/helpers/stream-tool-sieve.js'); const { parseToolCalls, parseStandaloneToolCalls } = require('../../internal/js/helpers/stream-tool-sieve.js');
const { parseChunkForContent, estimateTokens } = chatStream.__test; const { parseChunkForContent, estimateTokens } = chatStream.__test;
@@ -41,12 +41,14 @@ test('js compat: toolcall fixtures', () => {
for (const file of files) { for (const file of files) {
const name = file.replace(/\.json$/i, ''); const name = file.replace(/\.json$/i, '');
const fixture = readJSON(path.join(fixtureDir, file)); const fixture = readJSON(path.join(fixtureDir, file));
const expected = readJSON(path.join(expectedDir, `toolcalls_${name}.json`)); const expected = readJSON(path.join(expectedDir, `toolcalls_${name}.json`));
const got = parseToolCalls(fixture.text, fixture.tool_names || []); const mode = typeof fixture.mode === 'string' ? fixture.mode.trim().toLowerCase() : '';
assert.deepEqual(got, expected.calls, `${name}: calls mismatch`); const parser = mode === 'standalone' ? parseStandaloneToolCalls : parseToolCalls;
} const got = parser(fixture.text, fixture.tool_names || []);
}); assert.deepEqual(got, expected.calls, `${name}: calls mismatch`);
}
});
test('js compat: token fixtures', () => { test('js compat: token fixtures', () => {
const fixture = readJSON(path.join(compatRoot, 'fixtures', 'token_cases.json')); const fixture = readJSON(path.join(compatRoot, 'fixtures', 'token_cases.json'));

View File

@@ -9,7 +9,9 @@ const {
processToolSieveChunk, processToolSieveChunk,
flushToolSieve, flushToolSieve,
parseToolCalls, parseToolCalls,
parseToolCallsDetailed,
parseStandaloneToolCalls, parseStandaloneToolCalls,
formatOpenAIStreamToolCalls,
} = require('../../internal/js/helpers/stream-tool-sieve.js'); } = require('../../internal/js/helpers/stream-tool-sieve.js');
function runSieve(chunks, toolNames) { function runSieve(chunks, toolNames) {
@@ -60,13 +62,25 @@ test('parseToolCalls drops unknown schema names when toolNames is provided', ()
assert.equal(calls.length, 0); assert.equal(calls.length, 0);
}); });
test('parseToolCalls keeps unknown names when toolNames is empty', () => { test('parseToolCalls matches tool name case-insensitively and canonicalizes', () => {
const payload = JSON.stringify({
tool_calls: [{ name: 'Read_File', input: { path: 'README.MD' } }],
});
const calls = parseToolCalls(payload, ['read_file']);
assert.deepEqual(calls, [{ name: 'read_file', input: { path: 'README.MD' } }]);
});
test('parseToolCalls rejects all names when toolNames is empty (Go strict parity)', () => {
const payload = JSON.stringify({ const payload = JSON.stringify({
tool_calls: [{ name: 'not_in_schema', input: { q: 'go' } }], tool_calls: [{ name: 'not_in_schema', input: { q: 'go' } }],
}); });
const calls = parseToolCalls(payload, []); const calls = parseToolCalls(payload, []);
assert.equal(calls.length, 1); assert.equal(calls.length, 0);
assert.equal(calls[0].name, 'not_in_schema');
const detailed = parseToolCallsDetailed(payload, []);
assert.equal(detailed.sawToolCallSyntax, true);
assert.equal(detailed.rejectedByPolicy, true);
assert.deepEqual(detailed.rejectedToolNames, ['not_in_schema']);
}); });
test('parseToolCalls supports fenced json and function.arguments string payload', () => { test('parseToolCalls supports fenced json and function.arguments string payload', () => {
@@ -95,7 +109,7 @@ test('parseStandaloneToolCalls ignores fenced code block tool_call examples', ()
assert.equal(calls.length, 0); assert.equal(calls.length, 0);
}); });
test('sieve emits tool_calls and does not leak suspicious prefix on late key convergence', () => { test('sieve keeps late key convergence payload as plain text in strict mode', () => {
const events = runSieve( const events = runSieve(
[ [
'{"', '{"',
@@ -107,9 +121,9 @@ test('sieve emits tool_calls and does not leak suspicious prefix on late key con
const leakedText = collectText(events); const leakedText = collectText(events);
const hasToolCall = events.some((evt) => evt.type === 'tool_calls' && Array.isArray(evt.calls) && evt.calls.length > 0); const hasToolCall = events.some((evt) => evt.type === 'tool_calls' && Array.isArray(evt.calls) && evt.calls.length > 0);
const hasToolDelta = events.some((evt) => evt.type === 'tool_call_deltas' && Array.isArray(evt.deltas) && evt.deltas.length > 0); const hasToolDelta = events.some((evt) => evt.type === 'tool_call_deltas' && Array.isArray(evt.deltas) && evt.deltas.length > 0);
assert.equal(hasToolCall || hasToolDelta, true); assert.equal(hasToolCall || hasToolDelta, false);
assert.equal(leakedText.includes('{'), false); assert.equal(leakedText.includes('{'), true);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false); assert.equal(leakedText.toLowerCase().includes('tool_calls'), true);
assert.equal(leakedText.includes('后置正文C。'), true); assert.equal(leakedText.includes('后置正文C。'), true);
}); });
@@ -141,6 +155,20 @@ test('sieve flushes incomplete captured tool json as text on stream finalize', (
assert.equal(leakedText.includes('{'), true); assert.equal(leakedText.includes('{'), true);
}); });
test('sieve still intercepts large tool json payloads over previous capture limit', () => {
const large = 'a'.repeat(9000);
const payload = `{"tool_calls":[{"name":"read_file","input":{"path":"${large}"}}]}`;
const events = runSieve(
[payload.slice(0, 3000), payload.slice(3000, 7000), payload.slice(7000)],
['read_file'],
);
const leakedText = collectText(events);
const hasToolCall = events.some((evt) => evt.type === 'tool_calls' && evt.calls?.length > 0);
const hasToolDelta = events.some((evt) => evt.type === 'tool_call_deltas' && evt.deltas?.length > 0);
assert.equal(hasToolCall || hasToolDelta, true);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false);
});
test('sieve keeps plain text intact in tool mode when no tool call appears', () => { test('sieve keeps plain text intact in tool mode when no tool call appears', () => {
const events = runSieve( const events = runSieve(
['你好,', '这是普通文本回复。', '请继续。'], ['你好,', '这是普通文本回复。', '请继续。'],
@@ -166,7 +194,7 @@ test('sieve intercepts rejected unknown tool payload (no args) without raw leak'
assert.equal(leakedText.includes('后置正文G。'), true); assert.equal(leakedText.includes('后置正文G。'), true);
}); });
test('sieve emits incremental tool_call_deltas for split arguments payload', () => { test('sieve emits final tool_calls for split arguments payload without incremental deltas', () => {
const state = createToolSieveState(); const state = createToolSieveState();
const first = processToolSieveChunk( const first = processToolSieveChunk(
state, state,
@@ -181,37 +209,43 @@ test('sieve emits incremental tool_call_deltas for split arguments payload', ()
const tail = flushToolSieve(state, ['read_file']); const tail = flushToolSieve(state, ['read_file']);
const events = [...first, ...second, ...tail]; const events = [...first, ...second, ...tail];
const deltaEvents = events.filter((evt) => evt.type === 'tool_call_deltas'); const deltaEvents = events.filter((evt) => evt.type === 'tool_call_deltas');
assert.equal(deltaEvents.length > 0, true); assert.equal(deltaEvents.length, 0);
const merged = deltaEvents.flatMap((evt) => evt.deltas || []); const finalCalls = events.filter((evt) => evt.type === 'tool_calls').flatMap((evt) => evt.calls || []);
const hasName = merged.some((d) => d.name === 'read_file'); assert.equal(finalCalls.length, 1);
const argsJoined = merged assert.equal(finalCalls[0].name, 'read_file');
.map((d) => d.arguments || '') assert.deepEqual(finalCalls[0].input, { path: 'README.MD', mode: 'head' });
.join('');
assert.equal(hasName, true);
assert.equal(argsJoined.includes('"path":"README.MD"'), true);
assert.equal(argsJoined.includes('"mode":"head"'), true);
}); });
test('sieve still intercepts tool call after leading plain text without suffix', () => { test('sieve keeps tool json as text when leading prose exists (strict mode)', () => {
const events = runSieve( const events = runSieve(
['我将调用工具。', '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}'], ['我将调用工具。', '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}'],
['read_file'], ['read_file'],
); );
const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0)); const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0));
const leakedText = collectText(events); const leakedText = collectText(events);
assert.equal(hasTool, true); assert.equal(hasTool, false);
assert.equal(leakedText.includes('我将调用工具。'), true); assert.equal(leakedText.includes('我将调用工具。'), true);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false); assert.equal(leakedText.toLowerCase().includes('tool_calls'), true);
}); });
test('sieve intercepts tool call and preserves trailing same-chunk text', () => { test('sieve keeps same-chunk trailing prose payload as text in strict mode', () => {
const events = runSieve( const events = runSieve(
['{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}然后继续解释。'], ['{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}然后继续解释。'],
['read_file'], ['read_file'],
); );
const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0)); const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0));
const leakedText = collectText(events); const leakedText = collectText(events);
assert.equal(hasTool, true); assert.equal(hasTool, false);
assert.equal(leakedText.includes('然后继续解释。'), true); assert.equal(leakedText.includes('然后继续解释。'), true);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false); assert.equal(leakedText.toLowerCase().includes('tool_calls'), true);
});
test('formatOpenAIStreamToolCalls reuses ids with the same idStore', () => {
const idStore = new Map();
const calls = [{ name: 'read_file', input: { path: 'README.MD' } }];
const first = formatOpenAIStreamToolCalls(calls, idStore);
const second = formatOpenAIStreamToolCalls(calls, idStore);
assert.equal(first.length, 1);
assert.equal(second.length, 1);
assert.equal(first[0].id, second[0].id);
}); });

View File

@@ -24,9 +24,8 @@
<meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" /> <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" />
<meta name="apple-mobile-web-app-title" content="DS2API" /> <meta name="apple-mobile-web-app-title" content="DS2API" />
<!-- Favicon - using data URI for orange-yellow gradient icon --> <!-- Favicon -->
<link rel="icon" type="image/svg+xml" <link rel="icon" type="image/svg+xml" href="/ds2api-favicon.svg" />
href="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 100 100'%3E%3Cdefs%3E%3ClinearGradient id='g' x1='0%25' y1='0%25' x2='100%25' y2='100%25'%3E%3Cstop offset='0%25' stop-color='%23f59e0b'/%3E%3Cstop offset='100%25' stop-color='%23ef4444'/%3E%3C/linearGradient%3E%3C/defs%3E%3Crect rx='20' width='100' height='100' fill='url(%23g)'/%3E%3Ctext x='50' y='68' font-family='Arial,sans-serif' font-size='48' font-weight='bold' fill='white' text-anchor='middle'%3EDS%3C/text%3E%3C/svg%3E" />
<!-- Fonts --> <!-- Fonts -->
<link rel="preconnect" href="https://fonts.googleapis.com"> <link rel="preconnect" href="https://fonts.googleapis.com">

View File

@@ -0,0 +1,20 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100" role="img" aria-label="DS2API icon">
<defs>
<linearGradient id="g" x1="0%" y1="0%" x2="100%" y2="100%">
<stop offset="0%" stop-color="#f59e0b" />
<stop offset="100%" stop-color="#ef4444" />
</linearGradient>
</defs>
<rect width="100" height="100" rx="20" fill="url(#g)" />
<text
x="50"
y="68"
text-anchor="middle"
font-family="Arial,sans-serif"
font-size="48"
font-weight="700"
fill="#ffffff"
>
DS
</text>
</svg>

After

Width:  |  Height:  |  Size: 539 B

View File

@@ -1,117 +1,121 @@
import { useI18n } from '../../i18n' import { useI18n } from '../../i18n'
import { useAccountsData } from './useAccountsData' import { useAccountsData } from './useAccountsData'
import { useAccountActions } from './useAccountActions' import { useAccountActions } from './useAccountActions'
import QueueCards from './QueueCards' import QueueCards from './QueueCards'
import ApiKeysPanel from './ApiKeysPanel' import ApiKeysPanel from './ApiKeysPanel'
import AccountsTable from './AccountsTable' import AccountsTable from './AccountsTable'
import AddKeyModal from './AddKeyModal' import AddKeyModal from './AddKeyModal'
import AddAccountModal from './AddAccountModal' import AddAccountModal from './AddAccountModal'
export default function AccountManagerContainer({ config, onRefresh, onMessage, authFetch }) { export default function AccountManagerContainer({ config, onRefresh, onMessage, authFetch }) {
const { t } = useI18n() const { t } = useI18n()
const apiFetch = authFetch || fetch const apiFetch = authFetch || fetch
const { const {
queueStatus, queueStatus,
keysExpanded, keysExpanded,
setKeysExpanded, setKeysExpanded,
accounts, accounts,
page, page,
pageSize, pageSize,
totalPages, totalPages,
totalAccounts, totalAccounts,
loadingAccounts, loadingAccounts,
fetchAccounts, fetchAccounts,
changePageSize, changePageSize,
resolveAccountIdentifier, resolveAccountIdentifier,
} = useAccountsData({ apiFetch }) searchQuery,
handleSearchChange,
const { } = useAccountsData({ apiFetch })
showAddKey,
setShowAddKey, const {
showAddAccount, showAddKey,
setShowAddAccount, setShowAddKey,
newKey, showAddAccount,
setNewKey, setShowAddAccount,
copiedKey, newKey,
setCopiedKey, setNewKey,
newAccount, copiedKey,
setNewAccount, setCopiedKey,
loading, newAccount,
testing, setNewAccount,
testingAll, loading,
batchProgress, testing,
addKey, testingAll,
deleteKey, batchProgress,
addAccount, addKey,
deleteAccount, deleteKey,
testAccount, addAccount,
testAllAccounts, deleteAccount,
} = useAccountActions({ testAccount,
apiFetch, testAllAccounts,
t, } = useAccountActions({
onMessage, apiFetch,
onRefresh, t,
config, onMessage,
fetchAccounts, onRefresh,
resolveAccountIdentifier, config,
}) fetchAccounts,
resolveAccountIdentifier,
return ( })
<div className="space-y-6">
<QueueCards queueStatus={queueStatus} t={t} /> return (
<div className="space-y-6">
<ApiKeysPanel <QueueCards queueStatus={queueStatus} t={t} />
t={t}
config={config} <ApiKeysPanel
keysExpanded={keysExpanded} t={t}
setKeysExpanded={setKeysExpanded} config={config}
setShowAddKey={setShowAddKey} keysExpanded={keysExpanded}
copiedKey={copiedKey} setKeysExpanded={setKeysExpanded}
setCopiedKey={setCopiedKey} setShowAddKey={setShowAddKey}
onDeleteKey={deleteKey} copiedKey={copiedKey}
/> setCopiedKey={setCopiedKey}
onDeleteKey={deleteKey}
<AccountsTable />
t={t}
accounts={accounts} <AccountsTable
loadingAccounts={loadingAccounts} t={t}
testing={testing} accounts={accounts}
testingAll={testingAll} loadingAccounts={loadingAccounts}
batchProgress={batchProgress} testing={testing}
totalAccounts={totalAccounts} testingAll={testingAll}
page={page} batchProgress={batchProgress}
pageSize={pageSize} totalAccounts={totalAccounts}
totalPages={totalPages} page={page}
resolveAccountIdentifier={resolveAccountIdentifier} pageSize={pageSize}
onTestAll={testAllAccounts} totalPages={totalPages}
onShowAddAccount={() => setShowAddAccount(true)} resolveAccountIdentifier={resolveAccountIdentifier}
onTestAccount={testAccount} onTestAll={testAllAccounts}
onDeleteAccount={deleteAccount} onShowAddAccount={() => setShowAddAccount(true)}
onPrevPage={() => fetchAccounts(page - 1)} onTestAccount={testAccount}
onNextPage={() => fetchAccounts(page + 1)} onDeleteAccount={deleteAccount}
onPageSizeChange={changePageSize} onPrevPage={() => fetchAccounts(page - 1)}
/> onNextPage={() => fetchAccounts(page + 1)}
onPageSizeChange={changePageSize}
<AddKeyModal searchQuery={searchQuery}
show={showAddKey} onSearchChange={handleSearchChange}
t={t} />
newKey={newKey}
setNewKey={setNewKey} <AddKeyModal
loading={loading} show={showAddKey}
onClose={() => setShowAddKey(false)} t={t}
onAdd={addKey} newKey={newKey}
/> setNewKey={setNewKey}
loading={loading}
<AddAccountModal onClose={() => setShowAddKey(false)}
show={showAddAccount} onAdd={addKey}
t={t} />
newAccount={newAccount}
setNewAccount={setNewAccount} <AddAccountModal
loading={loading} show={showAddAccount}
onClose={() => setShowAddAccount(false)} t={t}
onAdd={addAccount} newAccount={newAccount}
/> setNewAccount={setNewAccount}
</div> loading={loading}
) onClose={() => setShowAddAccount(false)}
} onAdd={addAccount}
/>
</div>
)
}

View File

@@ -1,182 +1,191 @@
import { useState } from 'react' import { useState } from 'react'
import { ChevronLeft, ChevronRight, Check, Copy, Play, Plus, Trash2 } from 'lucide-react' import { ChevronLeft, ChevronRight, Check, Copy, Play, Plus, Trash2 } from 'lucide-react'
import clsx from 'clsx' import clsx from 'clsx'
export default function AccountsTable({ export default function AccountsTable({
t, t,
accounts, accounts,
loadingAccounts, loadingAccounts,
testing, testing,
testingAll, testingAll,
batchProgress, batchProgress,
totalAccounts, totalAccounts,
page, page,
pageSize, pageSize,
totalPages, totalPages,
resolveAccountIdentifier, resolveAccountIdentifier,
onTestAll, onTestAll,
onShowAddAccount, onShowAddAccount,
onTestAccount, onTestAccount,
onDeleteAccount, onDeleteAccount,
onPrevPage, onPrevPage,
onNextPage, onNextPage,
onPageSizeChange, onPageSizeChange,
}) { searchQuery,
const [copiedId, setCopiedId] = useState(null) onSearchChange,
}) {
const copyId = (id) => { const [copiedId, setCopiedId] = useState(null)
navigator.clipboard.writeText(id).then(() => {
setCopiedId(id) const copyId = (id) => {
setTimeout(() => setCopiedId(null), 1500) navigator.clipboard.writeText(id).then(() => {
}) setCopiedId(id)
} setTimeout(() => setCopiedId(null), 1500)
return ( })
<div className="bg-card border border-border rounded-xl overflow-hidden shadow-sm"> }
<div className="p-6 border-b border-border flex flex-col md:flex-row md:items-center justify-between gap-4"> return (
<div> <div className="bg-card border border-border rounded-xl overflow-hidden shadow-sm">
<h2 className="text-lg font-semibold">{t('accountManager.accountsTitle')}</h2> <div className="p-6 border-b border-border flex flex-col md:flex-row md:items-center justify-between gap-4">
<p className="text-sm text-muted-foreground">{t('accountManager.accountsDesc')}</p> <div>
</div> <h2 className="text-lg font-semibold">{t('accountManager.accountsTitle')}</h2>
<div className="flex flex-wrap gap-2"> <p className="text-sm text-muted-foreground">{t('accountManager.accountsDesc')}</p>
<button </div>
onClick={onTestAll} <div className="flex flex-wrap gap-2">
disabled={testingAll || totalAccounts === 0} <input
className="flex items-center px-3 py-2 bg-secondary text-secondary-foreground rounded-lg hover:bg-secondary/80 transition-colors text-xs font-medium border border-border disabled:opacity-50" type="text"
> value={searchQuery}
{testingAll ? <span className="animate-spin mr-2"></span> : <Play className="w-3 h-3 mr-2" />} onChange={e => onSearchChange(e.target.value)}
{t('accountManager.testAll')} placeholder={t('accountManager.searchPlaceholder')}
</button> className="px-3 py-1.5 text-sm bg-muted border border-border rounded-lg focus:outline-none focus:ring-1 focus:ring-ring placeholder:text-muted-foreground"
<button />
onClick={onShowAddAccount} <button
className="flex items-center gap-2 px-4 py-2 bg-primary text-primary-foreground rounded-lg hover:bg-primary/90 transition-colors font-medium text-sm shadow-sm" onClick={onTestAll}
> disabled={testingAll || totalAccounts === 0}
<Plus className="w-4 h-4" /> className="flex items-center px-3 py-2 bg-secondary text-secondary-foreground rounded-lg hover:bg-secondary/80 transition-colors text-xs font-medium border border-border disabled:opacity-50"
{t('accountManager.addAccount')} >
</button> {testingAll ? <span className="animate-spin mr-2"></span> : <Play className="w-3 h-3 mr-2" />}
</div> {t('accountManager.testAll')}
</div> </button>
<button
{testingAll && batchProgress.total > 0 && ( onClick={onShowAddAccount}
<div className="p-4 border-b border-border bg-muted/30"> className="flex items-center gap-2 px-4 py-2 bg-primary text-primary-foreground rounded-lg hover:bg-primary/90 transition-colors font-medium text-sm shadow-sm"
<div className="flex items-center justify-between text-sm mb-2"> >
<span className="font-medium">{t('accountManager.testingAllAccounts')}</span> <Plus className="w-4 h-4" />
<span className="text-muted-foreground">{batchProgress.current} / {batchProgress.total}</span> {t('accountManager.addAccount')}
</div> </button>
<div className="w-full bg-muted rounded-full h-2 overflow-hidden mb-4"> </div>
<div </div>
className="bg-primary h-full transition-all duration-300"
style={{ width: `${(batchProgress.current / batchProgress.total) * 100}%` }} {testingAll && batchProgress.total > 0 && (
/> <div className="p-4 border-b border-border bg-muted/30">
</div> <div className="flex items-center justify-between text-sm mb-2">
{batchProgress.results.length > 0 && ( <span className="font-medium">{t('accountManager.testingAllAccounts')}</span>
<div className="grid grid-cols-2 md:grid-cols-4 gap-2 max-h-32 overflow-y-auto custom-scrollbar"> <span className="text-muted-foreground">{batchProgress.current} / {batchProgress.total}</span>
{batchProgress.results.map((r, i) => ( </div>
<div key={i} className={clsx( <div className="w-full bg-muted rounded-full h-2 overflow-hidden mb-4">
"text-xs px-2 py-1 rounded border truncate", <div
r.success ? "bg-emerald-500/10 border-emerald-500/20 text-emerald-500" : "bg-destructive/10 border-destructive/20 text-destructive" className="bg-primary h-full transition-all duration-300"
)}> style={{ width: `${(batchProgress.current / batchProgress.total) * 100}%` }}
{r.success ? '✓' : '✗'} {r.id} />
</div> </div>
))} {batchProgress.results.length > 0 && (
</div> <div className="grid grid-cols-2 md:grid-cols-4 gap-2 max-h-32 overflow-y-auto custom-scrollbar">
)} {batchProgress.results.map((r, i) => (
</div> <div key={i} className={clsx(
)} "text-xs px-2 py-1 rounded border truncate",
r.success ? "bg-emerald-500/10 border-emerald-500/20 text-emerald-500" : "bg-destructive/10 border-destructive/20 text-destructive"
<div className="divide-y divide-border"> )}>
{loadingAccounts ? ( {r.success ? '✓' : '✗'} {r.id}
<div className="p-8 text-center text-muted-foreground">{t('actions.loading')}</div> </div>
) : accounts.length > 0 ? ( ))}
accounts.map((acc, i) => { </div>
const id = resolveAccountIdentifier(acc) )}
return ( </div>
<div key={i} className="p-4 flex flex-col md:flex-row md:items-center justify-between gap-4 hover:bg-muted/50 transition-colors"> )}
<div className="flex items-center gap-3 min-w-0">
<div className={clsx( <div className="divide-y divide-border">
"w-2 h-2 rounded-full shrink-0", {loadingAccounts ? (
acc.test_status === 'failed' ? "bg-red-500 shadow-[0_0_8px_rgba(239,68,68,0.5)]" : <div className="p-8 text-center text-muted-foreground">{t('actions.loading')}</div>
(acc.test_status === 'ok' || acc.has_token) ? "bg-emerald-500 shadow-[0_0_8px_rgba(16,185,129,0.5)]" : ) : accounts.length > 0 ? (
"bg-amber-500" accounts.map((acc, i) => {
)} /> const id = resolveAccountIdentifier(acc)
<div className="min-w-0"> return (
<div <div key={i} className="p-4 flex flex-col md:flex-row md:items-center justify-between gap-4 hover:bg-muted/50 transition-colors">
className="font-medium truncate flex items-center gap-1.5 cursor-pointer hover:text-primary transition-colors group" <div className="flex items-center gap-3 min-w-0">
onClick={() => copyId(id)} <div className={clsx(
> "w-2 h-2 rounded-full shrink-0",
<span className="truncate">{id || '-'}</span> acc.test_status === 'failed' ? "bg-red-500 shadow-[0_0_8px_rgba(239,68,68,0.5)]" :
{copiedId === id (acc.test_status === 'ok' || acc.has_token) ? "bg-emerald-500 shadow-[0_0_8px_rgba(16,185,129,0.5)]" :
? <Check className="w-3 h-3 text-emerald-500 shrink-0" /> "bg-amber-500"
: <Copy className="w-3 h-3 opacity-0 group-hover:opacity-50 shrink-0 transition-opacity" /> )} />
} <div className="min-w-0">
</div> <div
<div className="flex items-center gap-2 text-xs text-muted-foreground mt-0.5"> className="font-medium truncate flex items-center gap-1.5 cursor-pointer hover:text-primary transition-colors group"
<span>{acc.test_status === 'failed' ? t('accountManager.testStatusFailed') : (acc.test_status === 'ok' || acc.has_token) ? t('accountManager.sessionActive') : t('accountManager.reauthRequired')}</span> onClick={() => copyId(id)}
{acc.token_preview && ( >
<span className="font-mono bg-muted px-1.5 py-0.5 rounded text-[10px]"> <span className="truncate">{id || '-'}</span>
{acc.token_preview} {copiedId === id
</span> ? <Check className="w-3 h-3 text-emerald-500 shrink-0" />
)} : <Copy className="w-3 h-3 opacity-0 group-hover:opacity-50 shrink-0 transition-opacity" />
</div> }
</div> </div>
</div> <div className="flex items-center gap-2 text-xs text-muted-foreground mt-0.5">
<div className="flex items-center gap-2 self-start lg:self-auto ml-5 lg:ml-0"> <span>{acc.test_status === 'failed' ? t('accountManager.testStatusFailed') : (acc.test_status === 'ok' || acc.has_token) ? t('accountManager.sessionActive') : t('accountManager.reauthRequired')}</span>
<button {acc.token_preview && (
onClick={() => onTestAccount(id)} <span className="font-mono bg-muted px-1.5 py-0.5 rounded text-[10px]">
disabled={testing[id]} {acc.token_preview}
className="px-2 lg:px-3 py-1 lg:py-1.5 text-[10px] lg:text-xs font-medium border border-border rounded-md hover:bg-secondary transition-colors disabled:opacity-50" </span>
> )}
{testing[id] ? t('actions.testing') : t('actions.test')} </div>
</button> </div>
<button </div>
onClick={() => onDeleteAccount(id)} <div className="flex items-center gap-2 self-start lg:self-auto ml-5 lg:ml-0">
className="p-1 lg:p-1.5 text-muted-foreground hover:text-destructive hover:bg-destructive/10 rounded-md transition-colors" <button
> onClick={() => onTestAccount(id)}
<Trash2 className="w-3.5 h-3.5 lg:w-4 lg:h-4" /> disabled={testing[id]}
</button> className="px-2 lg:px-3 py-1 lg:py-1.5 text-[10px] lg:text-xs font-medium border border-border rounded-md hover:bg-secondary transition-colors disabled:opacity-50"
</div> >
</div> {testing[id] ? t('actions.testing') : t('actions.test')}
) </button>
}) <button
) : ( onClick={() => onDeleteAccount(id)}
<div className="p-8 text-center text-muted-foreground">{t('accountManager.noAccounts')}</div> className="p-1 lg:p-1.5 text-muted-foreground hover:text-destructive hover:bg-destructive/10 rounded-md transition-colors"
)} >
</div> <Trash2 className="w-3.5 h-3.5 lg:w-4 lg:h-4" />
</button>
{totalPages > 1 && ( </div>
<div className="p-4 border-t border-border flex items-center justify-between"> </div>
<div className="flex items-center gap-3"> )
<div className="text-sm text-muted-foreground"> })
{t('accountManager.pageInfo', { current: page, total: totalPages, count: totalAccounts })} ) : (
</div> <div className="p-8 text-center text-muted-foreground">{searchQuery ? t('accountManager.searchNoResults') : t('accountManager.noAccounts')}</div>
<select )}
value={pageSize} </div>
onChange={e => onPageSizeChange(Number(e.target.value))}
className="text-sm border border-border rounded-md px-2 py-1 bg-background text-foreground" {totalPages > 1 && (
> <div className="p-4 border-t border-border flex items-center justify-between">
{[10, 20, 50, 100, 500, 1000, 2000, 5000].map(s => ( <div className="flex items-center gap-3">
<option key={s} value={s}>{s}</option> <div className="text-sm text-muted-foreground">
))} {t('accountManager.pageInfo', { current: page, total: totalPages, count: totalAccounts })}
</select> </div>
</div> <select
<div className="flex items-center gap-2"> value={pageSize}
<button onChange={e => onPageSizeChange(Number(e.target.value))}
onClick={onPrevPage} className="text-sm border border-border rounded-md px-2 py-1 bg-background text-foreground"
disabled={page <= 1 || loadingAccounts} >
className="p-2 border border-border rounded-md hover:bg-secondary transition-colors disabled:opacity-50 disabled:cursor-not-allowed" {[10, 20, 50, 100, 500, 1000, 2000, 5000].map(s => (
> <option key={s} value={s}>{s}</option>
<ChevronLeft className="w-4 h-4" /> ))}
</button> </select>
<span className="text-sm font-medium px-2">{page} / {totalPages}</span> </div>
<button <div className="flex items-center gap-2">
onClick={onNextPage} <button
disabled={page >= totalPages || loadingAccounts} onClick={onPrevPage}
className="p-2 border border-border rounded-md hover:bg-secondary transition-colors disabled:opacity-50 disabled:cursor-not-allowed" disabled={page <= 1 || loadingAccounts}
> className="p-2 border border-border rounded-md hover:bg-secondary transition-colors disabled:opacity-50 disabled:cursor-not-allowed"
<ChevronRight className="w-4 h-4" /> >
</button> <ChevronLeft className="w-4 h-4" />
</div> </button>
</div> <span className="text-sm font-medium px-2">{page} / {totalPages}</span>
)} <button
</div> onClick={onNextPage}
) disabled={page >= totalPages || loadingAccounts}
} className="p-2 border border-border rounded-md hover:bg-secondary transition-colors disabled:opacity-50 disabled:cursor-not-allowed"
>
<ChevronRight className="w-4 h-4" />
</button>
</div>
</div>
)}
</div>
)
}

View File

@@ -1,75 +1,86 @@
import { useEffect, useState } from 'react' import { useEffect, useState } from 'react'
export function useAccountsData({ apiFetch }) { export function useAccountsData({ apiFetch }) {
const [queueStatus, setQueueStatus] = useState(null) const [queueStatus, setQueueStatus] = useState(null)
const [keysExpanded, setKeysExpanded] = useState(false) const [keysExpanded, setKeysExpanded] = useState(false)
const [accounts, setAccounts] = useState([]) const [accounts, setAccounts] = useState([])
const [page, setPage] = useState(1) const [page, setPage] = useState(1)
const [pageSize, setPageSize] = useState(10) const [pageSize, setPageSize] = useState(10)
const [totalPages, setTotalPages] = useState(1) const [totalPages, setTotalPages] = useState(1)
const [totalAccounts, setTotalAccounts] = useState(0) const [totalAccounts, setTotalAccounts] = useState(0)
const [loadingAccounts, setLoadingAccounts] = useState(false) const [loadingAccounts, setLoadingAccounts] = useState(false)
const resolveAccountIdentifier = (acc) => { const resolveAccountIdentifier = (acc) => {
if (!acc || typeof acc !== 'object') return '' if (!acc || typeof acc !== 'object') return ''
return String(acc.identifier || acc.email || acc.mobile || '').trim() return String(acc.identifier || acc.email || acc.mobile || '').trim()
} }
const fetchAccounts = async (targetPage = page, targetPageSize = pageSize) => { const [searchQuery, setSearchQuery] = useState('')
setLoadingAccounts(true)
try { const fetchAccounts = async (targetPage = page, targetPageSize = pageSize, targetQuery = searchQuery) => {
const res = await apiFetch(`/admin/accounts?page=${targetPage}&page_size=${targetPageSize}`) setLoadingAccounts(true)
if (res.ok) { try {
const data = await res.json() let url = `/admin/accounts?page=${targetPage}&page_size=${targetPageSize}`
setAccounts(data.items || []) if (targetQuery.trim()) url += `&q=${encodeURIComponent(targetQuery.trim())}`
setTotalPages(data.total_pages || 1) const res = await apiFetch(url)
setTotalAccounts(data.total || 0) if (res.ok) {
setPage(data.page || 1) const data = await res.json()
} setAccounts(data.items || [])
} catch (e) { setTotalPages(data.total_pages || 1)
console.error('Failed to fetch accounts:', e) setTotalAccounts(data.total || 0)
} finally { setPage(data.page || 1)
setLoadingAccounts(false) }
} } catch (e) {
} console.error('Failed to fetch accounts:', e)
} finally {
const changePageSize = (newSize) => { setLoadingAccounts(false)
setPageSize(newSize) }
fetchAccounts(1, newSize) }
}
const changePageSize = (newSize) => {
const fetchQueueStatus = async () => { setPageSize(newSize)
try { fetchAccounts(1, newSize)
const res = await apiFetch('/admin/queue/status') }
if (res.ok) {
const data = await res.json() const handleSearchChange = (query) => {
setQueueStatus(data) setSearchQuery(query)
} fetchAccounts(1, pageSize, query)
} catch (e) { }
console.error('Failed to fetch queue status:', e)
} const fetchQueueStatus = async () => {
} try {
const res = await apiFetch('/admin/queue/status')
useEffect(() => { if (res.ok) {
fetchAccounts() const data = await res.json()
fetchQueueStatus() setQueueStatus(data)
const interval = setInterval(fetchQueueStatus, 5000) }
return () => clearInterval(interval) } catch (e) {
}, []) console.error('Failed to fetch queue status:', e)
}
return { }
queueStatus,
keysExpanded, useEffect(() => {
setKeysExpanded, fetchAccounts()
accounts, fetchQueueStatus()
page, const interval = setInterval(fetchQueueStatus, 5000)
pageSize, return () => clearInterval(interval)
totalPages, }, [])
totalAccounts,
loadingAccounts, return {
fetchAccounts, queueStatus,
changePageSize, keysExpanded,
resolveAccountIdentifier, setKeysExpanded,
} accounts,
} page,
pageSize,
totalPages,
totalAccounts,
loadingAccounts,
fetchAccounts,
changePageSize,
resolveAccountIdentifier,
searchQuery,
handleSearchChange,
}
}

View File

@@ -1,295 +1,297 @@
{ {
"language": { "language": {
"label": "Language", "label": "Language",
"english": "English", "english": "English",
"chinese": "中文" "chinese": "中文"
}, },
"nav": { "nav": {
"accounts": { "accounts": {
"label": "Account Management", "label": "Account Management",
"desc": "Manage the DeepSeek account pool" "desc": "Manage the DeepSeek account pool"
}, },
"test": { "test": {
"label": "API Test", "label": "API Test",
"desc": "Test API connectivity and responses" "desc": "Test API connectivity and responses"
}, },
"import": { "import": {
"label": "Batch Import", "label": "Batch Import",
"desc": "Bulk import account configuration" "desc": "Bulk import account configuration"
}, },
"vercel": { "vercel": {
"label": "Vercel Sync", "label": "Vercel Sync",
"desc": "Sync configuration to Vercel" "desc": "Sync configuration to Vercel"
}, },
"settings": { "settings": {
"label": "Settings", "label": "Settings",
"desc": "Edit runtime and security settings online" "desc": "Edit runtime and security settings online"
} }
}, },
"sidebar": { "sidebar": {
"onlineAdminConsole": "Online Admin Console", "onlineAdminConsole": "Online Admin Console",
"systemStatus": "System Status", "systemStatus": "System Status",
"statusOnline": "Online", "statusOnline": "Online",
"accounts": "Accounts", "accounts": "Accounts",
"keys": "Keys", "keys": "Keys",
"signOut": "Sign out" "signOut": "Sign out"
}, },
"auth": { "auth": {
"expired": "Authentication expired. Please sign in again.", "expired": "Authentication expired. Please sign in again.",
"checking": "Checking authentication status..." "checking": "Checking authentication status..."
}, },
"errors": { "errors": {
"fetchConfig": "Failed to fetch configuration: {error}" "fetchConfig": "Failed to fetch configuration: {error}"
}, },
"actions": { "actions": {
"cancel": "Cancel", "cancel": "Cancel",
"add": "Add", "add": "Add",
"delete": "Delete", "delete": "Delete",
"copy": "Copy", "copy": "Copy",
"generate": "Generate", "generate": "Generate",
"test": "Test", "test": "Test",
"testing": "Testing...", "testing": "Testing...",
"loading": "Loading..." "loading": "Loading..."
}, },
"messages": { "messages": {
"deleted": "Deleted successfully", "deleted": "Deleted successfully",
"deleteFailed": "Delete failed", "deleteFailed": "Delete failed",
"failedToAdd": "Failed to add", "failedToAdd": "Failed to add",
"networkError": "Network error.", "networkError": "Network error.",
"requestFailed": "Request failed.", "requestFailed": "Request failed.",
"generationStopped": "Generation stopped.", "generationStopped": "Generation stopped.",
"invalidJson": "Invalid JSON format.", "invalidJson": "Invalid JSON format.",
"importFailed": "Import failed.", "importFailed": "Import failed.",
"copyFailed": "Copy failed." "copyFailed": "Copy failed."
}, },
"landing": { "landing": {
"adminConsole": "Admin Console", "adminConsole": "Admin Console",
"apiStatus": "API Status", "apiStatus": "API Status",
"features": { "features": {
"compatibility": { "compatibility": {
"title": "Full Compatibility", "title": "Full Compatibility",
"desc": "OpenAI & Claude format support" "desc": "OpenAI & Claude format support"
}, },
"loadBalancing": { "loadBalancing": {
"title": "Load Balancing", "title": "Load Balancing",
"desc": "Smart rotation with stable throughput" "desc": "Smart rotation with stable throughput"
}, },
"reasoning": { "reasoning": {
"title": "Deep Reasoning", "title": "Deep Reasoning",
"desc": "Expose reasoning traces when enabled" "desc": "Expose reasoning traces when enabled"
}, },
"search": { "search": {
"title": "Web Search", "title": "Web Search",
"desc": "Integrated native web search" "desc": "Integrated native web search"
} }
} }
}, },
"accountManager": { "accountManager": {
"addKeySuccess": "API key added successfully.", "addKeySuccess": "API key added successfully.",
"addAccountSuccess": "Account added successfully.", "addAccountSuccess": "Account added successfully.",
"requiredFields": "Password and email/mobile are required.", "requiredFields": "Password and email/mobile are required.",
"deleteKeyConfirm": "Are you sure you want to delete this API key?", "deleteKeyConfirm": "Are you sure you want to delete this API key?",
"deleteAccountConfirm": "Are you sure you want to delete this account?", "deleteAccountConfirm": "Are you sure you want to delete this account?",
"invalidIdentifier": "Invalid account identifier. Operation aborted.", "invalidIdentifier": "Invalid account identifier. Operation aborted.",
"testAllConfirm": "Test API connectivity for all accounts?", "testAllConfirm": "Test API connectivity for all accounts?",
"testAllCompleted": "Completed: {success}/{total} available", "testAllCompleted": "Completed: {success}/{total} available",
"testFailed": "Test failed: {error}", "testFailed": "Test failed: {error}",
"available": "Available", "available": "Available",
"inUse": "In use", "inUse": "In use",
"totalPool": "Total pool", "totalPool": "Total pool",
"accountsUnit": "accounts", "accountsUnit": "accounts",
"threadsUnit": "threads", "threadsUnit": "threads",
"apiKeysTitle": "API Keys", "apiKeysTitle": "API Keys",
"apiKeysDesc": "Manage the API access key pool", "apiKeysDesc": "Manage the API access key pool",
"addKey": "Add key", "addKey": "Add key",
"copied": "Copied", "copied": "Copied",
"copyKeyTitle": "Copy key", "copyKeyTitle": "Copy key",
"deleteKeyTitle": "Delete key", "deleteKeyTitle": "Delete key",
"noApiKeys": "No API keys found.", "noApiKeys": "No API keys found.",
"accountsTitle": "DeepSeek Accounts", "accountsTitle": "DeepSeek Accounts",
"accountsDesc": "Manage the DeepSeek account pool", "accountsDesc": "Manage the DeepSeek account pool",
"testAll": "Test all", "testAll": "Test all",
"addAccount": "Add account", "addAccount": "Add account",
"testingAllAccounts": "Testing all accounts...", "testingAllAccounts": "Testing all accounts...",
"sessionActive": "Session active", "sessionActive": "Session active",
"reauthRequired": "Re-auth required", "reauthRequired": "Re-auth required",
"testStatusFailed": "Last test failed", "testStatusFailed": "Last test failed",
"noAccounts": "No accounts found.", "noAccounts": "No accounts found.",
"modalAddKeyTitle": "Add API key", "modalAddKeyTitle": "Add API key",
"newKeyLabel": "New key value", "newKeyLabel": "New key value",
"newKeyPlaceholder": "Enter a custom API key", "newKeyPlaceholder": "Enter a custom API key",
"generate": "Generate", "generate": "Generate",
"generateHint": "Click Generate to create a random key.", "generateHint": "Click Generate to create a random key.",
"addKeyLoading": "Adding...", "addKeyLoading": "Adding...",
"addKeyAction": "Add key", "addKeyAction": "Add key",
"modalAddAccountTitle": "Add DeepSeek account", "modalAddAccountTitle": "Add DeepSeek account",
"emailOptional": "Email (optional)", "emailOptional": "Email (optional)",
"mobileOptional": "Mobile (optional)", "mobileOptional": "Mobile (optional)",
"passwordLabel": "Password", "passwordLabel": "Password",
"passwordPlaceholder": "Account password", "passwordPlaceholder": "Account password",
"addAccountLoading": "Adding...", "addAccountLoading": "Adding...",
"addAccountAction": "Add account", "addAccountAction": "Add account",
"pageInfo": "Page {current}/{total}, {count} accounts total" "pageInfo": "Page {current}/{total}, {count} accounts total",
}, "searchPlaceholder": "Search accounts...",
"apiTester": { "searchNoResults": "No accounts match your search"
"defaultMessage": "Hello, please introduce yourself in one sentence.", },
"models": { "apiTester": {
"chat": "Non-reasoning model", "defaultMessage": "Hello, please introduce yourself in one sentence.",
"reasoner": "Reasoning model", "models": {
"chatSearch": "Non-reasoning model (with search)", "chat": "Non-reasoning model",
"reasonerSearch": "Reasoning model (with search)" "reasoner": "Reasoning model",
}, "chatSearch": "Non-reasoning model (with search)",
"missingApiKey": "Please provide an API key.", "reasonerSearch": "Reasoning model (with search)"
"requestFailed": "Request failed.", },
"networkError": "Network error: {error}", "missingApiKey": "Please provide an API key.",
"testSuccess": "{account}: Test successful ({time}ms)", "requestFailed": "Request failed.",
"config": "Configuration", "networkError": "Network error: {error}",
"modelLabel": "Model", "testSuccess": "{account}: Test successful ({time}ms)",
"streamMode": "Streaming", "config": "Configuration",
"accountSelector": "Account", "modelLabel": "Model",
"autoRandom": "🤖 Auto / Random", "streamMode": "Streaming",
"apiKeyOptional": "API Key (optional)", "accountSelector": "Account",
"apiKeyDefault": "Default: ...{suffix}", "autoRandom": "🤖 Auto / Random",
"apiKeyPlaceholder": "Enter a custom key", "apiKeyOptional": "API Key (optional)",
"modeManaged": "Managed key mode (uses account pool).", "apiKeyDefault": "Default: ...{suffix}",
"modeDirect": "Direct token mode (requires a valid DeepSeek token).", "apiKeyPlaceholder": "Enter a custom key",
"statusError": "Error", "modeManaged": "Managed key mode (uses account pool).",
"reasoningTrace": "Reasoning Trace", "modeDirect": "Direct token mode (requires a valid DeepSeek token).",
"generating": "Generating response...", "statusError": "Error",
"enterMessage": "Enter a message...", "reasoningTrace": "Reasoning Trace",
"adminConsoleLabel": "DeepSeek admin console" "generating": "Generating response...",
}, "enterMessage": "Enter a message...",
"batchImport": { "adminConsoleLabel": "DeepSeek admin console"
"templates": { },
"full": { "batchImport": {
"name": "Full configuration template", "templates": {
"desc": "Includes keys, accounts, and model mapping" "full": {
}, "name": "Full configuration template",
"emailOnly": { "desc": "Includes keys, accounts, and model mapping"
"name": "Email-only accounts", },
"desc": "Batch import accounts using email login" "emailOnly": {
}, "name": "Email-only accounts",
"mobileOnly": { "desc": "Batch import accounts using email login"
"name": "Mobile-only accounts", },
"desc": "Batch import accounts using mobile login" "mobileOnly": {
}, "name": "Mobile-only accounts",
"keysOnly": { "desc": "Batch import accounts using mobile login"
"name": "API keys only", },
"desc": "Add API access keys only" "keysOnly": {
} "name": "API keys only",
}, "desc": "Add API access keys only"
"enterJson": "Please provide JSON configuration content.", }
"importSuccess": "Import successful: {keys} keys, {accounts} accounts", },
"templateLoaded": "Template loaded: {name}", "enterJson": "Please provide JSON configuration content.",
"currentConfigLoaded": "Current configuration loaded.", "importSuccess": "Import successful: {keys} keys, {accounts} accounts",
"fetchConfigFailed": "Failed to fetch configuration.", "templateLoaded": "Template loaded: {name}",
"copySuccess": "Base64 configuration copied to clipboard.", "currentConfigLoaded": "Current configuration loaded.",
"quickTemplates": "Quick Templates", "fetchConfigFailed": "Failed to fetch configuration.",
"dataExport": "Data Export", "copySuccess": "Base64 configuration copied to clipboard.",
"dataExportDesc": "Copy the Base64-encoded configuration for Vercel environment variables.", "quickTemplates": "Quick Templates",
"copyBase64": "Copy Base64 config", "dataExport": "Data Export",
"copied": "Copied", "dataExportDesc": "Copy the Base64-encoded configuration for Vercel environment variables.",
"variableName": "Variable name", "copyBase64": "Copy Base64 config",
"jsonEditor": "JSON Editor", "copied": "Copied",
"loadCurrentConfig": "Load current config", "variableName": "Variable name",
"applyConfig": "Apply config", "jsonEditor": "JSON Editor",
"importing": "Importing...", "loadCurrentConfig": "Load current config",
"importComplete": "Import complete", "applyConfig": "Apply config",
"importSummary": "Imported {keys} API keys and updated {accounts} accounts." "importing": "Importing...",
}, "importComplete": "Import complete",
"settings": { "importSummary": "Imported {keys} API keys and updated {accounts} accounts."
"loadFailed": "Failed to load settings.", },
"nonJsonResponse": "Unexpected non-JSON response from server (status: {status}).", "settings": {
"save": "Save settings", "loadFailed": "Failed to load settings.",
"saving": "Saving...", "nonJsonResponse": "Unexpected non-JSON response from server (status: {status}).",
"saveSuccess": "Settings saved and hot reloaded.", "save": "Save settings",
"saveFailed": "Failed to save settings.", "saving": "Saving...",
"securityTitle": "Security", "saveSuccess": "Settings saved and hot reloaded.",
"jwtExpireHours": "JWT expiry (hours)", "saveFailed": "Failed to save settings.",
"newPassword": "New admin password", "securityTitle": "Security",
"newPasswordPlaceholder": "Enter new password (min 4 chars)", "jwtExpireHours": "JWT expiry (hours)",
"updatePassword": "Update password", "newPassword": "New admin password",
"updating": "Updating...", "newPasswordPlaceholder": "Enter new password (min 4 chars)",
"passwordTooShort": "Password must be at least 4 characters.", "updatePassword": "Update password",
"passwordUpdated": "Password updated. Please sign in again.", "updating": "Updating...",
"passwordUpdateFailed": "Failed to update password.", "passwordTooShort": "Password must be at least 4 characters.",
"runtimeTitle": "Concurrency & Queue", "passwordUpdated": "Password updated. Please sign in again.",
"accountMaxInflight": "Per-account max inflight", "passwordUpdateFailed": "Failed to update password.",
"accountMaxQueue": "Account max queue size", "runtimeTitle": "Concurrency & Queue",
"globalMaxInflight": "Global max inflight", "accountMaxInflight": "Per-account max inflight",
"behaviorTitle": "Behavior", "accountMaxQueue": "Account max queue size",
"toolcallMode": "Toolcall mode", "globalMaxInflight": "Global max inflight",
"earlyEmitConfidence": "Early emit confidence", "behaviorTitle": "Behavior",
"responsesTTL": "Responses store TTL (seconds)", "toolcallMode": "Toolcall mode",
"embeddingsProvider": "Embeddings provider", "earlyEmitConfidence": "Early emit confidence",
"modelTitle": "Model mapping", "responsesTTL": "Responses store TTL (seconds)",
"claudeMapping": "Claude mapping (JSON)", "embeddingsProvider": "Embeddings provider",
"modelAliases": "Model aliases (JSON)", "modelTitle": "Model mapping",
"backupTitle": "Backup & Restore", "claudeMapping": "Claude mapping (JSON)",
"loadExport": "Load current export", "modelAliases": "Model aliases (JSON)",
"importModeMerge": "Merge import (default)", "backupTitle": "Backup & Restore",
"importModeReplace": "Replace all import", "loadExport": "Load current export",
"importNow": "Import now", "importModeMerge": "Merge import (default)",
"importing": "Importing...", "importModeReplace": "Replace all import",
"importPlaceholder": "Paste config JSON to import", "importNow": "Import now",
"importEmpty": "Please input import JSON.", "importing": "Importing...",
"importInvalidJson": "Import JSON is invalid.", "importPlaceholder": "Paste config JSON to import",
"importFailed": "Import failed.", "importEmpty": "Please input import JSON.",
"importSuccess": "Config imported (mode: {mode}).", "importInvalidJson": "Import JSON is invalid.",
"exportFailed": "Export failed.", "importFailed": "Import failed.",
"exportLoaded": "Current export loaded.", "importSuccess": "Config imported (mode: {mode}).",
"exportJson": "Export JSON", "exportFailed": "Export failed.",
"invalidJsonField": "{field} is not a valid JSON object.", "exportLoaded": "Current export loaded.",
"defaultPasswordWarning": "You are using the default admin password \"admin\". Please change it.", "exportJson": "Export JSON",
"vercelSyncHint": "Configuration changed. For Vercel deployments, sync manually in Vercel Sync and redeploy.", "invalidJsonField": "{field} is not a valid JSON object.",
"autoFetchPaused": "Auto loading paused after {count} failures: {error}", "defaultPasswordWarning": "You are using the default admin password \"admin\". Please change it.",
"retryLoad": "Retry now" "vercelSyncHint": "Configuration changed. For Vercel deployments, sync manually in Vercel Sync and redeploy.",
}, "autoFetchPaused": "Auto loading paused after {count} failures: {error}",
"login": { "retryLoad": "Retry now"
"welcome": "Welcome back", },
"subtitle": "Enter your admin key to continue", "login": {
"adminKeyLabel": "Admin key", "welcome": "Welcome back",
"adminKeyPlaceholder": "Enter your admin key...", "subtitle": "Enter your admin key to continue",
"rememberSession": "Remember this session", "adminKeyLabel": "Admin key",
"signIn": "Sign in", "adminKeyPlaceholder": "Enter your admin key...",
"secureConnection": "Secure connection", "rememberSession": "Remember this session",
"adminPortal": "DS2API admin portal", "signIn": "Sign in",
"signInFailed": "Sign-in failed.", "secureConnection": "Secure connection",
"networkError": "Network error: {error}" "adminPortal": "DS2API admin portal",
}, "signInFailed": "Sign-in failed.",
"vercel": { "networkError": "Network error: {error}"
"tokenRequired": "Vercel access token is required.", },
"projectRequired": "Project ID is required.", "vercel": {
"syncFailed": "Sync failed.", "tokenRequired": "Vercel access token is required.",
"networkError": "Network error.", "projectRequired": "Project ID is required.",
"title": "Vercel Deployment", "syncFailed": "Sync failed.",
"description": "Sync the current keys and accounts directly to Vercel environment variables.", "networkError": "Network error.",
"tokenLabel": "Vercel Access Token", "title": "Vercel Deployment",
"getToken": "Get token", "description": "Sync the current keys and accounts directly to Vercel environment variables.",
"tokenPlaceholderPreconfig": "Using preconfigured token", "tokenLabel": "Vercel Access Token",
"tokenPlaceholder": "Enter Vercel access token", "getToken": "Get token",
"projectIdLabel": "Project ID", "tokenPlaceholderPreconfig": "Using preconfigured token",
"projectIdHint": "Find it in Project Settings → General.", "tokenPlaceholder": "Enter Vercel access token",
"teamIdLabel": "Team ID", "projectIdLabel": "Project ID",
"optional": "optional", "projectIdHint": "Find it in Project Settings → General.",
"syncing": "Syncing...", "teamIdLabel": "Team ID",
"syncRedeploy": "Sync & redeploy", "optional": "optional",
"redeployHint": "This triggers a Vercel redeploy and usually takes 3060 seconds.", "syncing": "Syncing...",
"syncSucceeded": "Sync succeeded", "syncRedeploy": "Sync & redeploy",
"syncFailedLabel": "Sync failed", "redeployHint": "This triggers a Vercel redeploy and usually takes 3060 seconds.",
"openDeployment": "Open deployment", "syncSucceeded": "Sync succeeded",
"statusSynced": "Synced", "syncFailedLabel": "Sync failed",
"statusNotSynced": "Not synced", "openDeployment": "Open deployment",
"statusNeverSynced": "Never synced", "statusSynced": "Synced",
"lastSyncTime": "Last sync: {time}", "statusNotSynced": "Not synced",
"pollPaused": "Status polling paused after {count} failures.", "statusNeverSynced": "Never synced",
"manualRefresh": "Refresh manually", "lastSyncTime": "Last sync: {time}",
"howItWorks": "How it works", "pollPaused": "Status polling paused after {count} failures.",
"steps": { "manualRefresh": "Refresh manually",
"one": "The current configuration (keys and accounts) is exported as JSON.", "howItWorks": "How it works",
"two": "The JSON is Base64-encoded for safe formatting.", "steps": {
"three": "Update the env var in Vercel:", "one": "The current configuration (keys and accounts) is exported as JSON.",
"four": "Trigger a redeploy to apply the updated environment variables." "two": "The JSON is Base64-encoded for safe formatting.",
} "three": "Update the env var in Vercel:",
} "four": "Trigger a redeploy to apply the updated environment variables."
} }
}
}

View File

@@ -1,295 +1,297 @@
{ {
"language": { "language": {
"label": "语言", "label": "语言",
"english": "English", "english": "English",
"chinese": "中文" "chinese": "中文"
}, },
"nav": { "nav": {
"accounts": { "accounts": {
"label": "账号管理", "label": "账号管理",
"desc": "管理 DeepSeek 账号池" "desc": "管理 DeepSeek 账号池"
}, },
"test": { "test": {
"label": "API 测试", "label": "API 测试",
"desc": "测试 API 连接与响应" "desc": "测试 API 连接与响应"
}, },
"import": { "import": {
"label": "批量导入", "label": "批量导入",
"desc": "批量导入账号配置" "desc": "批量导入账号配置"
}, },
"vercel": { "vercel": {
"label": "Vercel 同步", "label": "Vercel 同步",
"desc": "同步配置到 Vercel" "desc": "同步配置到 Vercel"
}, },
"settings": { "settings": {
"label": "设置中心", "label": "设置中心",
"desc": "在线修改系统设置与配置" "desc": "在线修改系统设置与配置"
} }
}, },
"sidebar": { "sidebar": {
"onlineAdminConsole": "在线管理面板", "onlineAdminConsole": "在线管理面板",
"systemStatus": "系统状态", "systemStatus": "系统状态",
"statusOnline": "在线", "statusOnline": "在线",
"accounts": "账号", "accounts": "账号",
"keys": "密钥", "keys": "密钥",
"signOut": "退出登录" "signOut": "退出登录"
}, },
"auth": { "auth": {
"expired": "认证已过期,请重新登录", "expired": "认证已过期,请重新登录",
"checking": "正在检查登录状态..." "checking": "正在检查登录状态..."
}, },
"errors": { "errors": {
"fetchConfig": "获取配置失败: {error}" "fetchConfig": "获取配置失败: {error}"
}, },
"actions": { "actions": {
"cancel": "取消", "cancel": "取消",
"add": "添加", "add": "添加",
"delete": "删除", "delete": "删除",
"copy": "复制", "copy": "复制",
"generate": "生成", "generate": "生成",
"test": "测试", "test": "测试",
"testing": "正在测试...", "testing": "正在测试...",
"loading": "加载中..." "loading": "加载中..."
}, },
"messages": { "messages": {
"deleted": "删除成功", "deleted": "删除成功",
"deleteFailed": "删除失败", "deleteFailed": "删除失败",
"failedToAdd": "添加失败", "failedToAdd": "添加失败",
"networkError": "网络错误", "networkError": "网络错误",
"requestFailed": "请求失败", "requestFailed": "请求失败",
"generationStopped": "已停止生成", "generationStopped": "已停止生成",
"invalidJson": "无效的 JSON 格式", "invalidJson": "无效的 JSON 格式",
"importFailed": "导入失败", "importFailed": "导入失败",
"copyFailed": "复制失败" "copyFailed": "复制失败"
}, },
"landing": { "landing": {
"adminConsole": "管理面板", "adminConsole": "管理面板",
"apiStatus": "API 状态", "apiStatus": "API 状态",
"features": { "features": {
"compatibility": { "compatibility": {
"title": "全面兼容", "title": "全面兼容",
"desc": "适配 OpenAI 与 Claude 格式" "desc": "适配 OpenAI 与 Claude 格式"
}, },
"loadBalancing": { "loadBalancing": {
"title": "负载均衡", "title": "负载均衡",
"desc": "智能轮询,稳定高效" "desc": "智能轮询,稳定高效"
}, },
"reasoning": { "reasoning": {
"title": "深度思考", "title": "深度思考",
"desc": "支持推理过程输出" "desc": "支持推理过程输出"
}, },
"search": { "search": {
"title": "联网搜索", "title": "联网搜索",
"desc": "集成原生网页搜索能力" "desc": "集成原生网页搜索能力"
} }
} }
}, },
"accountManager": { "accountManager": {
"addKeySuccess": "API 密钥添加成功", "addKeySuccess": "API 密钥添加成功",
"addAccountSuccess": "账号添加成功", "addAccountSuccess": "账号添加成功",
"requiredFields": "需要填写密码以及邮箱或手机号", "requiredFields": "需要填写密码以及邮箱或手机号",
"deleteKeyConfirm": "确定要删除此 API 密钥吗?", "deleteKeyConfirm": "确定要删除此 API 密钥吗?",
"deleteAccountConfirm": "确定要删除此账号吗?", "deleteAccountConfirm": "确定要删除此账号吗?",
"invalidIdentifier": "账号标识无效,无法执行操作", "invalidIdentifier": "账号标识无效,无法执行操作",
"testAllConfirm": "测试所有账号的 API 连通性?", "testAllConfirm": "测试所有账号的 API 连通性?",
"testAllCompleted": "完成:{success}/{total} 可用", "testAllCompleted": "完成:{success}/{total} 可用",
"testFailed": "测试失败: {error}", "testFailed": "测试失败: {error}",
"available": "可用", "available": "可用",
"inUse": "正在使用", "inUse": "正在使用",
"totalPool": "账号池总数", "totalPool": "账号池总数",
"accountsUnit": "个账号", "accountsUnit": "个账号",
"threadsUnit": "线程", "threadsUnit": "线程",
"apiKeysTitle": "API 密钥", "apiKeysTitle": "API 密钥",
"apiKeysDesc": "管理 API 访问密钥池", "apiKeysDesc": "管理 API 访问密钥池",
"addKey": "添加密钥", "addKey": "添加密钥",
"copied": "已复制", "copied": "已复制",
"copyKeyTitle": "复制密钥", "copyKeyTitle": "复制密钥",
"deleteKeyTitle": "删除密钥", "deleteKeyTitle": "删除密钥",
"noApiKeys": "未找到 API 密钥", "noApiKeys": "未找到 API 密钥",
"accountsTitle": "DeepSeek 账号", "accountsTitle": "DeepSeek 账号",
"accountsDesc": "管理 DeepSeek 账号池", "accountsDesc": "管理 DeepSeek 账号池",
"testAll": "测试全部", "testAll": "测试全部",
"addAccount": "添加账号", "addAccount": "添加账号",
"testingAllAccounts": "正在测试所有账号...", "testingAllAccounts": "正在测试所有账号...",
"sessionActive": "已建立会话", "sessionActive": "已建立会话",
"reauthRequired": "需重新登录", "reauthRequired": "需重新登录",
"testStatusFailed": "上次测试失败", "testStatusFailed": "上次测试失败",
"noAccounts": "未找到任何账号", "noAccounts": "未找到任何账号",
"modalAddKeyTitle": "添加 API 密钥", "modalAddKeyTitle": "添加 API 密钥",
"newKeyLabel": "新密钥值", "newKeyLabel": "新密钥值",
"newKeyPlaceholder": "输入自定义 API 密钥", "newKeyPlaceholder": "输入自定义 API 密钥",
"generate": "生成", "generate": "生成",
"generateHint": "点击「生成」自动创建随机密钥", "generateHint": "点击「生成」自动创建随机密钥",
"addKeyLoading": "添加中...", "addKeyLoading": "添加中...",
"addKeyAction": "添加密钥", "addKeyAction": "添加密钥",
"modalAddAccountTitle": "添加 DeepSeek 账号", "modalAddAccountTitle": "添加 DeepSeek 账号",
"emailOptional": "邮箱 (可选)", "emailOptional": "邮箱 (可选)",
"mobileOptional": "手机号 (可选)", "mobileOptional": "手机号 (可选)",
"passwordLabel": "密码", "passwordLabel": "密码",
"passwordPlaceholder": "账号密码", "passwordPlaceholder": "账号密码",
"addAccountLoading": "添加中...", "addAccountLoading": "添加中...",
"addAccountAction": "添加账号", "addAccountAction": "添加账号",
"pageInfo": "第 {current}/{total} 页,共 {count} 个账号" "pageInfo": "第 {current}/{total} 页,共 {count} 个账号",
}, "searchPlaceholder": "搜索账号...",
"apiTester": { "searchNoResults": "未找到匹配的账号"
"defaultMessage": "你好,请用一句话介绍你自己。", },
"models": { "apiTester": {
"chat": "非思考模型", "defaultMessage": "你好,请用一句话介绍你自己。",
"reasoner": "思考模型", "models": {
"chatSearch": "非思考模型 (带搜索)", "chat": "非思考模型",
"reasonerSearch": "思考模型 (带搜索)" "reasoner": "思考模型",
}, "chatSearch": "非思考模型 (带搜索)",
"missingApiKey": "请提供 API 密钥", "reasonerSearch": "思考模型 (带搜索)"
"requestFailed": "请求失败", },
"networkError": "网络错误: {error}", "missingApiKey": "请提供 API 密钥",
"testSuccess": "{account}: 测试成功 ({time}ms)", "requestFailed": "请求失败",
"config": "配置", "networkError": "网络错误: {error}",
"modelLabel": "模型", "testSuccess": "{account}: 测试成功 ({time}ms)",
"streamMode": "流式模式", "config": "配置",
"accountSelector": "选择账号", "modelLabel": "模型",
"autoRandom": "🤖 自动 / 随机", "streamMode": "流式模式",
"apiKeyOptional": "API 密钥 (可选)", "accountSelector": "选择账号",
"apiKeyDefault": "默认: ...{suffix}", "autoRandom": "🤖 自动 / 随机",
"apiKeyPlaceholder": "输入自定义密钥", "apiKeyOptional": "API 密钥 (可选)",
"modeManaged": "当前使用托管 key 模式(会走账号池)。", "apiKeyDefault": "默认: ...{suffix}",
"modeDirect": "当前使用直通 token 模式(需填写有效 DeepSeek token", "apiKeyPlaceholder": "输入自定义密钥",
"statusError": "错误", "modeManaged": "当前使用托管 key 模式(会走账号池)。",
"reasoningTrace": "思维链过程", "modeDirect": "当前使用直通 token 模式(需填写有效 DeepSeek token",
"generating": "正在生成响应...", "statusError": "错误",
"enterMessage": "输入消息...", "reasoningTrace": "思维链过程",
"adminConsoleLabel": "DeepSeek 管理员界面" "generating": "正在生成响应...",
}, "enterMessage": "输入消息...",
"batchImport": { "adminConsoleLabel": "DeepSeek 管理员界面"
"templates": { },
"full": { "batchImport": {
"name": "全量配置模板", "templates": {
"desc": "包含密钥、账号及模型映射" "full": {
}, "name": "全量配置模板",
"emailOnly": { "desc": "包含密钥、账号及模型映射"
"name": "仅邮箱账号", },
"desc": "批量导入邮箱格式账号" "emailOnly": {
}, "name": "仅邮箱账号",
"mobileOnly": { "desc": "批量导入邮箱格式账号"
"name": "仅手机号账号", },
"desc": "批量导入手机号格式账号" "mobileOnly": {
}, "name": "仅手机号账号",
"keysOnly": { "desc": "批量导入手机号格式账号"
"name": "仅 API 密钥", },
"desc": "仅添加 API 访问密钥" "keysOnly": {
} "name": "仅 API 密钥",
}, "desc": "仅添加 API 访问密钥"
"enterJson": "请输入 JSON 配置内容", }
"importSuccess": "导入成功: {keys} 个密钥, {accounts} 个账号", },
"templateLoaded": "已加载模板: {name}", "enterJson": "请输入 JSON 配置内容",
"currentConfigLoaded": "当前配置已加载", "importSuccess": "导入成功: {keys} 个密钥, {accounts} 个账号",
"fetchConfigFailed": "获取配置失败", "templateLoaded": "已加载模板: {name}",
"copySuccess": "Base64 配置已复制到剪贴板", "currentConfigLoaded": "当前配置已加载",
"quickTemplates": "快速模板", "fetchConfigFailed": "获取配置失败",
"dataExport": "数据导出", "copySuccess": "Base64 配置已复制到剪贴板",
"dataExportDesc": "获取配置的 Base64 字符串,用于 Vercel 环境变量。", "quickTemplates": "快速模板",
"copyBase64": "复制 Base64 配置", "dataExport": "数据导出",
"copied": "已复制", "dataExportDesc": "获取配置的 Base64 字符串,用于 Vercel 环境变量。",
"variableName": "变量名", "copyBase64": "复制 Base64 配置",
"jsonEditor": "JSON 编辑器", "copied": "已复制",
"loadCurrentConfig": "加载当前配置", "variableName": "变量名",
"applyConfig": "应用配置", "jsonEditor": "JSON 编辑器",
"importing": "正在导入...", "loadCurrentConfig": "加载当前配置",
"importComplete": "导入操作已完成", "applyConfig": "应用配置",
"importSummary": "成功导入了 {keys} 个 API 密钥,并更新了 {accounts} 个账号。" "importing": "正在导入...",
}, "importComplete": "导入操作已完成",
"settings": { "importSummary": "成功导入了 {keys} 个 API 密钥,并更新了 {accounts} 个账号。"
"loadFailed": "加载设置失败", },
"nonJsonResponse": "服务端返回了非 JSON 响应(状态码:{status}", "settings": {
"save": "保存设置", "loadFailed": "加载设置失败",
"saving": "保存中...", "nonJsonResponse": "服务端返回了非 JSON 响应(状态码:{status}",
"saveSuccess": "设置已保存并热更新生效", "save": "保存设置",
"saveFailed": "保存设置失败", "saving": "保存中...",
"securityTitle": "安全设置", "saveSuccess": "设置已保存并热更新生效",
"jwtExpireHours": "JWT 有效期(小时)", "saveFailed": "保存设置失败",
"newPassword": "面板新密码", "securityTitle": "安全设置",
"newPasswordPlaceholder": "输入新密码(至少 4 位", "jwtExpireHours": "JWT 有效期(小时",
"updatePassword": "修改密码", "newPassword": "面板新密码",
"updating": "更新中...", "newPasswordPlaceholder": "输入新密码(至少 4 位)",
"passwordTooShort": "新密码至少 4 位", "updatePassword": "修改密码",
"passwordUpdated": "密码已更新,需重新登录", "updating": "更新中...",
"passwordUpdateFailed": "密码更新失败", "passwordTooShort": "密码至少 4 位",
"runtimeTitle": "并发与队列", "passwordUpdated": "密码已更新,需重新登录",
"accountMaxInflight": "每账号并发上限", "passwordUpdateFailed": "密码更新失败",
"accountMaxQueue": "账号等待队列上限", "runtimeTitle": "并发与队列",
"globalMaxInflight": "全局并发上限", "accountMaxInflight": "每账号并发上限",
"behaviorTitle": "行为设置", "accountMaxQueue": "账号等待队列上限",
"toolcallMode": "Toolcall 模式", "globalMaxInflight": "全局并发上限",
"earlyEmitConfidence": "早发置信度", "behaviorTitle": "行为设置",
"responsesTTL": "Responses 缓存 TTL", "toolcallMode": "Toolcall 模式",
"embeddingsProvider": "Embeddings Provider", "earlyEmitConfidence": "早发置信度",
"modelTitle": "模型映射", "responsesTTL": "Responses 缓存 TTL",
"claudeMapping": "Claude 映射JSON", "embeddingsProvider": "Embeddings Provider",
"modelAliases": "模型别名JSON", "modelTitle": "模型映射",
"backupTitle": "备份与恢复", "claudeMapping": "Claude 映射JSON",
"loadExport": "加载当前导出", "modelAliases": "模型别名JSON",
"importModeMerge": "合并导入(默认)", "backupTitle": "备份与恢复",
"importModeReplace": "全量覆盖导入", "loadExport": "加载当前导出",
"importNow": "立即导入", "importModeMerge": "合并导入(默认)",
"importing": "导入中...", "importModeReplace": "全量覆盖导入",
"importPlaceholder": "粘贴要导入的 JSON 配置", "importNow": "立即导入",
"importEmpty": "请先输入导入 JSON", "importing": "导入中...",
"importInvalidJson": "导入 JSON 格式无效", "importPlaceholder": "粘贴要导入 JSON 配置",
"importFailed": "导入失败", "importEmpty": "请先输入导入 JSON",
"importSuccess": "配置导入成功(模式:{mode}", "importInvalidJson": "导入 JSON 格式无效",
"exportFailed": "导失败", "importFailed": "导失败",
"exportLoaded": "已加载当前配置导出", "importSuccess": "配置导入成功(模式:{mode}",
"exportJson": "导出 JSON", "exportFailed": "导出失败",
"invalidJsonField": "{field} 不是有效 JSON 对象", "exportLoaded": "已加载当前配置导出",
"defaultPasswordWarning": "当前使用默认密码 admin请尽快在此修改。", "exportJson": "导出 JSON",
"vercelSyncHint": "当前配置已更新。Vercel 部署请到 Vercel 同步页面手动同步并重部署。", "invalidJsonField": "{field} 不是有效 JSON 对象",
"autoFetchPaused": "自动加载已暂停:连续失败 {count} 次({error}", "defaultPasswordWarning": "当前使用默认密码 admin请尽快在此修改。",
"retryLoad": "立即重试" "vercelSyncHint": "当前配置已更新。Vercel 部署请到 Vercel 同步页面手动同步并重部署。",
}, "autoFetchPaused": "自动加载已暂停:连续失败 {count} 次({error}",
"login": { "retryLoad": "立即重试"
"welcome": "欢迎回来", },
"subtitle": "请输入管理员密钥以继续", "login": {
"adminKeyLabel": "管理员密钥", "welcome": "欢迎回来",
"adminKeyPlaceholder": "输入您的管理员密钥...", "subtitle": "输入管理员密钥以继续",
"rememberSession": "记住登录状态", "adminKeyLabel": "管理员密钥",
"signIn": "登录", "adminKeyPlaceholder": "输入您的管理员密钥...",
"secureConnection": "安全连接", "rememberSession": "记住登录状态",
"adminPortal": "DS2API 管理员门户", "signIn": "登录",
"signInFailed": "登录失败", "secureConnection": "安全连接",
"networkError": "网络错误: {error}" "adminPortal": "DS2API 管理员门户",
}, "signInFailed": "登录失败",
"vercel": { "networkError": "网络错误: {error}"
"tokenRequired": "需要 Vercel 访问令牌", },
"projectRequired": "需要项目 ID", "vercel": {
"syncFailed": "同步失败", "tokenRequired": "需要 Vercel 访问令牌",
"networkError": "网络错误", "projectRequired": "需要项目 ID",
"title": "Vercel 部署", "syncFailed": "同步失败",
"description": "将当前密钥和账号配置直接同步到 Vercel 环境变量中。", "networkError": "网络错误",
"tokenLabel": "Vercel 访问令牌", "title": "Vercel 部署",
"getToken": "获取令牌", "description": "将当前密钥和账号配置直接同步到 Vercel 环境变量中。",
"tokenPlaceholderPreconfig": "正在使用预配置的令牌", "tokenLabel": "Vercel 访问令牌",
"tokenPlaceholder": "输入 Vercel 访问令牌", "getToken": "获取令牌",
"projectIdLabel": "项目 ID", "tokenPlaceholderPreconfig": "正在使用预配置的令牌",
"projectIdHint": "可在项目设置 (Project Settings) → 常规 (General) 中找到", "tokenPlaceholder": "输入 Vercel 访问令牌",
"teamIdLabel": "团队 ID", "projectIdLabel": "项目 ID",
"optional": "可选", "projectIdHint": "可在项目设置 (Project Settings) → 常规 (General) 中找到",
"syncing": "正在同步...", "teamIdLabel": "团队 ID",
"syncRedeploy": "同步并重新部署", "optional": "可选",
"redeployHint": "这将触发 Vercel 的重新部署,大约需要 30-60 秒。", "syncing": "正在同步...",
"syncSucceeded": "同步成功", "syncRedeploy": "同步并重新部署",
"syncFailedLabel": "同步失败", "redeployHint": "这将触发 Vercel 的重新部署,大约需要 30-60 秒。",
"openDeployment": "访问部署地址", "syncSucceeded": "同步成功",
"statusSynced": "同步", "syncFailedLabel": "同步失败",
"statusNotSynced": "未同步", "openDeployment": "访问部署地址",
"statusNeverSynced": "从未同步", "statusSynced": "同步",
"lastSyncTime": "上次同步: {time}", "statusNotSynced": "未同步",
"pollPaused": "状态轮询已暂停:连续失败 {count} 次。", "statusNeverSynced": "从未同步",
"manualRefresh": "手动刷新", "lastSyncTime": "上次同步: {time}",
"howItWorks": "工作原理", "pollPaused": "状态轮询已暂停:连续失败 {count} 次。",
"steps": { "manualRefresh": "手动刷新",
"one": "当前配置 (密钥和账号) 被导出为 JSON 字符串。", "howItWorks": "工作原理",
"two": "JSON 被编码为 Base64 以确保格式兼容性。", "steps": {
"three": "更新 Vercel 项目中的环境变量:", "one": "当前配置 (密钥和账号) 被导出为 JSON 字符串。",
"four": "触发重新部署以应用新的环境变量。" "two": "JSON 被编码为 Base64 以确保格式兼容性。",
} "three": "更新 Vercel 项目中的环境变量:",
} "four": "触发重新部署以应用新的环境变量。"
} }
}
}