mirror of
https://github.com/CJackHwang/ds2api.git
synced 2026-05-03 16:05:26 +08:00
Compare commits
190 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c32fe30239 | ||
|
|
03b2acfc9f | ||
|
|
a299c7d1c4 | ||
|
|
51d3578465 | ||
|
|
072ec57acd | ||
|
|
545ab0802f | ||
|
|
a7522b4188 | ||
|
|
1286b02247 | ||
|
|
2f7cb473fc | ||
|
|
ad80a57efa | ||
|
|
5f110e6910 | ||
|
|
7c0bc9ec0f | ||
|
|
a901250de7 | ||
|
|
dc5bffdf89 | ||
|
|
eccd8c957b | ||
|
|
b1d0ee07c0 | ||
|
|
0156f6b45b | ||
|
|
a9f46f5b25 | ||
|
|
e7d6807c7c | ||
|
|
d407ccb773 | ||
|
|
c8f7b6b371 | ||
|
|
20d71f528a | ||
|
|
36d0239dc6 | ||
|
|
e620752e2b | ||
|
|
44cb27872c | ||
|
|
3d52040b3b | ||
|
|
049e40e5f1 | ||
|
|
217e0e9f01 | ||
|
|
b3c54fcf3d | ||
|
|
1c38709d32 | ||
|
|
28e800c670 | ||
|
|
4389e02b29 | ||
|
|
e2756f800d | ||
|
|
55abf64717 | ||
|
|
76ee2faa12 | ||
|
|
9c3696194c | ||
|
|
0bca6e2cee | ||
|
|
934b40e572 | ||
|
|
dd5a0c5213 | ||
|
|
43402e7a26 | ||
|
|
6373c001f5 | ||
|
|
3430322e81 | ||
|
|
df1cfac9bc | ||
|
|
706e68de23 | ||
|
|
83b4c7bcad | ||
|
|
603801c542 | ||
|
|
445c95a4f2 | ||
|
|
0a6ef8e3f2 | ||
|
|
fd0ec29991 | ||
|
|
febd3ec83a | ||
|
|
2671298439 | ||
|
|
92e321fe2c | ||
|
|
fca8c01397 | ||
|
|
667e1e3710 | ||
|
|
4438d03c5c | ||
|
|
95b7665643 | ||
|
|
9896b1fc33 | ||
|
|
966f21211d | ||
|
|
7dc3af40b2 | ||
|
|
2f6b5ffda0 | ||
|
|
85e256ad4d | ||
|
|
7c3ff6ee7e | ||
|
|
63e62fd1b0 | ||
|
|
483d7af3d2 | ||
|
|
0f89823526 | ||
|
|
6a778e0d35 | ||
|
|
9035c350a7 | ||
|
|
ba80052a26 | ||
|
|
78fdd63470 | ||
|
|
4b4f097006 | ||
|
|
d3018c281b | ||
|
|
415a2359ad | ||
|
|
f702d45a24 | ||
|
|
90817cb9e2 | ||
|
|
b96f736bd2 | ||
|
|
8ab028c52a | ||
|
|
78366afec5 | ||
|
|
bd41c8a90c | ||
|
|
bc2a78ae29 | ||
|
|
192cdf8562 | ||
|
|
94c1acace5 | ||
|
|
273c18ba0f | ||
|
|
ae28e33184 | ||
|
|
0438ce9a12 | ||
|
|
af4a067dab | ||
|
|
33f6fef015 | ||
|
|
6d3979a1d6 | ||
|
|
c8922c7a88 | ||
|
|
241334c658 | ||
|
|
d7e071b24a | ||
|
|
89225c778e | ||
|
|
22160de2c4 | ||
|
|
0cbc2c875d | ||
|
|
a0984ef682 | ||
|
|
babfa973d6 | ||
|
|
ba4071d8b5 | ||
|
|
e1f8e493d2 | ||
|
|
907104a735 | ||
|
|
2c8409dcbb | ||
|
|
5c23261932 | ||
|
|
d7125ea106 | ||
|
|
929d9a8ef7 | ||
|
|
c03f733b83 | ||
|
|
047fc9bee2 | ||
|
|
52558838ef | ||
|
|
f1926a6ced | ||
|
|
6e21714e23 | ||
|
|
48c4f0df9f | ||
|
|
a550de30af | ||
|
|
23422e4a8e | ||
|
|
9c33bed403 | ||
|
|
c81294f1b7 | ||
|
|
28d2b0410f | ||
|
|
0c782407f5 | ||
|
|
27eb73d48b | ||
|
|
685b5011e4 | ||
|
|
15e9eb3639 | ||
|
|
f18e6b9b11 | ||
|
|
40ebc8e942 | ||
|
|
fa3e6d040d | ||
|
|
458e4469e5 | ||
|
|
72c8e7e9f9 | ||
|
|
b9c8e90d98 | ||
|
|
36fcba1280 | ||
|
|
801b5abce3 | ||
|
|
206c3d5479 | ||
|
|
b2903c35ed | ||
|
|
b26dc8b7de | ||
|
|
63271aea8c | ||
|
|
516da04bcd | ||
|
|
9f7b671e5e | ||
|
|
d3c0e747a4 | ||
|
|
d40888496e | ||
|
|
28bb85ad63 | ||
|
|
1e9170e385 | ||
|
|
9e4c5eff7b | ||
|
|
b82bc1311a | ||
|
|
fb43bd92f5 | ||
|
|
0378d8c0a9 | ||
|
|
2d5d211a7a | ||
|
|
70467054c3 | ||
|
|
6959aa2982 | ||
|
|
1602c3a43c | ||
|
|
a13293e113 | ||
|
|
90ce595325 | ||
|
|
40d5e3ebb5 | ||
|
|
645fce41c8 | ||
|
|
9360397197 | ||
|
|
162920f5d5 | ||
|
|
4048c3784b | ||
|
|
a505f2cb96 | ||
|
|
e2dfe15f48 | ||
|
|
22e951b4c4 | ||
|
|
c09a4b51a5 | ||
|
|
3627c7366d | ||
|
|
87e1b05e8e | ||
|
|
f6df01d3aa | ||
|
|
0fb1bc6611 | ||
|
|
0bfddf7943 | ||
|
|
2adbdd069c | ||
|
|
40b8182984 | ||
|
|
66c2944be2 | ||
|
|
193351ac19 | ||
|
|
a3b21c6b76 | ||
|
|
573c717a5d | ||
|
|
40c61949e8 | ||
|
|
7bff2c1bab | ||
|
|
4c83f36089 | ||
|
|
abc96a37d8 | ||
|
|
8a91fef6ab | ||
|
|
df61f06d9a | ||
|
|
7475defeca | ||
|
|
f13ad231ac | ||
|
|
1b0e8cbadb | ||
|
|
a44afb335a | ||
|
|
f1ba805173 | ||
|
|
131ca7d398 | ||
|
|
ed9efc5858 | ||
|
|
603e309721 | ||
|
|
c4cdce46c2 | ||
|
|
de9d128545 | ||
|
|
e4a4b0ac0b | ||
|
|
22e3f32c43 | ||
|
|
9a24b8dcc2 | ||
|
|
68ccbd3785 | ||
|
|
845fc1453e | ||
|
|
fe486d0078 | ||
|
|
d5c186b312 | ||
|
|
4cec942fff | ||
|
|
9a404e75fc |
105
.github/workflows/quality-gates.yml
vendored
105
.github/workflows/quality-gates.yml
vendored
@@ -5,12 +5,23 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: quality-gates-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.26.x"
|
||||
NODE_VERSION: "24"
|
||||
GOLANGCI_LINT_VERSION: "v2.11.4"
|
||||
|
||||
jobs:
|
||||
quality-gates:
|
||||
lint-and-refactor:
|
||||
name: Lint and Refactor Gate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -19,19 +30,13 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.26.x"
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "24"
|
||||
cache: "npm"
|
||||
cache-dependency-path: webui/package-lock.json
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: go.sum
|
||||
|
||||
- name: Setup golangci-lint
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: v2.11.4
|
||||
version: ${{ env.GOLANGCI_LINT_VERSION }}
|
||||
install-mode: binary
|
||||
verify: true
|
||||
|
||||
@@ -41,10 +46,88 @@ jobs:
|
||||
- name: Refactor Line Gate
|
||||
run: ./tests/scripts/check-refactor-line-gate.sh
|
||||
|
||||
go-unit:
|
||||
name: Go Unit (${{ matrix.os }})
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: go.sum
|
||||
|
||||
- name: Go Unit Gate
|
||||
run: ./tests/scripts/run-unit-go.sh
|
||||
|
||||
unit-all:
|
||||
name: Unit Gates (Go + Node)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: go.sum
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: npm
|
||||
cache-dependency-path: webui/package-lock.json
|
||||
|
||||
- name: Unit Gates (Go + Node)
|
||||
run: ./tests/scripts/run-unit-all.sh
|
||||
|
||||
webui-build:
|
||||
name: WebUI Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: npm
|
||||
cache-dependency-path: webui/package-lock.json
|
||||
|
||||
- name: WebUI Build Gate
|
||||
run: |
|
||||
npm ci --prefix webui
|
||||
npm ci --prefix webui --prefer-offline --no-audit
|
||||
npm run build --prefix webui
|
||||
|
||||
cross-build:
|
||||
name: Release Target Cross-Build
|
||||
if: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/dev' || github.ref == 'refs/heads/main') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: go.sum
|
||||
|
||||
- name: Cross-Build Release Targets
|
||||
env:
|
||||
CROSS_BUILD_JOBS: "3"
|
||||
run: ./tests/scripts/check-cross-build.sh
|
||||
|
||||
65
.github/workflows/release-artifacts.yml
vendored
65
.github/workflows/release-artifacts.yml
vendored
@@ -15,6 +15,14 @@ permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
concurrency:
|
||||
group: release-artifacts-${{ github.event.release.tag_name || github.event.inputs.release_tag }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.26.x"
|
||||
NODE_VERSION: "24"
|
||||
|
||||
jobs:
|
||||
build-and-upload:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -27,69 +35,30 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.26.x"
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: go.sum
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "24"
|
||||
node-version: ${{ env.NODE_VERSION }}
|
||||
cache: "npm"
|
||||
cache-dependency-path: webui/package-lock.json
|
||||
|
||||
- name: Release Blocking Gates
|
||||
run: |
|
||||
./tests/scripts/check-stage6-manual-smoke.sh
|
||||
./tests/scripts/check-refactor-line-gate.sh
|
||||
./tests/scripts/run-unit-all.sh
|
||||
|
||||
- name: Build WebUI
|
||||
run: |
|
||||
npm ci --prefix webui
|
||||
npm ci --prefix webui --prefer-offline --no-audit
|
||||
npm run build --prefix webui
|
||||
|
||||
- name: Build Multi-Platform Archives
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG="${RELEASE_TAG}"
|
||||
BUILD_VERSION="${TAG}"
|
||||
if [ -z "${BUILD_VERSION}" ] && [ -f VERSION ]; then
|
||||
BUILD_VERSION="$(cat VERSION | tr -d '[:space:]')"
|
||||
fi
|
||||
mkdir -p dist
|
||||
|
||||
targets=(
|
||||
"linux/amd64"
|
||||
"linux/arm64"
|
||||
"darwin/amd64"
|
||||
"darwin/arm64"
|
||||
"windows/amd64"
|
||||
)
|
||||
|
||||
for target in "${targets[@]}"; do
|
||||
GOOS="${target%/*}"
|
||||
GOARCH="${target#*/}"
|
||||
PKG="ds2api_${TAG}_${GOOS}_${GOARCH}"
|
||||
STAGE="dist/${PKG}"
|
||||
BIN="ds2api"
|
||||
if [ "${GOOS}" = "windows" ]; then
|
||||
BIN="ds2api.exe"
|
||||
fi
|
||||
|
||||
mkdir -p "${STAGE}/static"
|
||||
CGO_ENABLED=0 GOOS="${GOOS}" GOARCH="${GOARCH}" \
|
||||
go build -trimpath -ldflags="-s -w -X ds2api/internal/version.BuildVersion=${BUILD_VERSION}" -o "${STAGE}/${BIN}" ./cmd/ds2api
|
||||
|
||||
cp config.example.json .env.example LICENSE README.MD README.en.md "${STAGE}/"
|
||||
cp -R static/admin "${STAGE}/static/admin"
|
||||
|
||||
if [ "${GOOS}" = "windows" ]; then
|
||||
(cd dist && zip -rq "${PKG}.zip" "${PKG}")
|
||||
else
|
||||
tar -C dist -czf "dist/${PKG}.tar.gz" "${PKG}"
|
||||
fi
|
||||
|
||||
rm -rf "${STAGE}"
|
||||
done
|
||||
env:
|
||||
RELEASE_BUILD_JOBS: "3"
|
||||
run: ./scripts/build-release-archives.sh
|
||||
|
||||
- name: Prepare Docker release inputs
|
||||
run: |
|
||||
@@ -153,6 +122,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta_release.outputs.tags }}
|
||||
labels: ${{ steps.meta_release.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Export Docker image archives for release assets
|
||||
run: |
|
||||
@@ -162,12 +133,14 @@ jobs:
|
||||
docker buildx build \
|
||||
--platform linux/amd64 \
|
||||
--target runtime-from-dist \
|
||||
--cache-from type=gha \
|
||||
--output type=docker,dest="dist/ds2api_${TAG}_docker_linux_amd64.tar" \
|
||||
.
|
||||
|
||||
docker buildx build \
|
||||
--platform linux/arm64 \
|
||||
--target runtime-from-dist \
|
||||
--cache-from type=gha \
|
||||
--output type=docker,dest="dist/ds2api_${TAG}_docker_linux_arm64.tar" \
|
||||
.
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -29,6 +29,7 @@ yarn.lock
|
||||
pnpm-lock.yaml
|
||||
|
||||
# Build artifacts
|
||||
dist/
|
||||
*.tsbuildinfo
|
||||
.cache/
|
||||
.parcel-cache/
|
||||
|
||||
@@ -21,3 +21,9 @@ These rules apply to all agent-made changes in this repository.
|
||||
|
||||
- Keep changes additive and tightly scoped to the requested feature or bugfix.
|
||||
- Do not mix unrelated refactors into feature PRs unless they are required to make the change pass gates.
|
||||
|
||||
## Documentation Sync
|
||||
|
||||
- When business logic or user-visible behavior changes, update the corresponding documentation in the same change.
|
||||
- `docs/prompt-compatibility.md` is the source-of-truth document for the “API -> pure-text web-chat context” compatibility flow.
|
||||
- If a change affects message normalization, tool prompt injection, prompt-visible tool history, file/reference handling, history split, or completion payload assembly, update `docs/prompt-compatibility.md` in the same change.
|
||||
|
||||
145
API.en.md
145
API.en.md
@@ -31,13 +31,15 @@ Docs: [Overview](README.en.md) / [Architecture](docs/ARCHITECTURE.en.md) / [Depl
|
||||
| Base URL | `http://localhost:5001` or your deployment domain |
|
||||
| Default Content-Type | `application/json` |
|
||||
| Health probes | `GET /healthz`, `GET /readyz` |
|
||||
| CORS | Enabled (`Access-Control-Allow-Origin: *`, allows `Content-Type`, `Authorization`, `X-API-Key`, `X-Ds2-Target-Account`, `X-Ds2-Source`, `X-Vercel-Protection-Bypass`) |
|
||||
| CORS | Enabled (uniformly covers `/v1/*`, `/anthropic/*`, `/v1beta/models/*`, and `/admin/*`; echoes the browser `Origin` when present, otherwise `*`; default allow-list includes `Content-Type`, `Authorization`, `X-API-Key`, `X-Ds2-Target-Account`, `X-Ds2-Source`, `X-Vercel-Protection-Bypass`, `X-Goog-Api-Key`, `Anthropic-Version`, `Anthropic-Beta`, and also accepts third-party preflight-requested headers such as `x-stainless-*`; `/v1/chat/completions` on Vercel Node Runtime matches the same behavior; internal-only `X-Ds2-Internal-Token` remains blocked) |
|
||||
|
||||
- All JSON request bodies must be valid UTF-8; malformed byte sequences are rejected on ingress with `400 invalid json`.
|
||||
|
||||
### 3.0 Adapter-Layer Notes
|
||||
|
||||
- OpenAI / Claude / Gemini protocols are now mounted on one shared `chi` router tree assembled in `internal/server/router.go`.
|
||||
- Adapter responsibilities are streamlined to: **request normalization → DeepSeek invocation → protocol-shaped rendering**, reducing legacy split-logic paths.
|
||||
- Tool-calling semantics are aligned between Go and Node runtime: parsing is now centered on XML/Markup-family tool syntax (`<tool_call>` / `<function_call>` / `<invoke>` / `tool_use` / antml variants), plus stream-time anti-leak filtering.
|
||||
- Tool-calling semantics are aligned between Go and Node runtime: models should output the DSML shell `<|DSML|tool_calls>` → `<|DSML|invoke name="...">` → `<|DSML|parameter name="...">`; DS2API also accepts legacy canonical XML `<tool_calls>` → `<invoke name="...">` → `<parameter name="...">`. DSML is normalized back to XML at the parser entry, so internal parsing remains XML-based, with stream-time anti-leak filtering.
|
||||
- `Admin API` separates static config from runtime policy: `/admin/config*` for configuration state, `/admin/settings*` for runtime behavior.
|
||||
|
||||
---
|
||||
@@ -81,7 +83,7 @@ Two header formats accepted:
|
||||
- Token is in `config.keys` → **Managed account mode**: DS2API auto-selects an account via rotation
|
||||
- Token is not in `config.keys` → **Direct token mode**: treated as a DeepSeek token directly
|
||||
|
||||
**Optional header**: `X-Ds2-Target-Account: <email_or_mobile>` — Pin a specific managed account.
|
||||
**Optional header**: `X-Ds2-Target-Account: <email_or_mobile>` — Pin a specific managed account; if the target account does not exist or the managed-account queue is exhausted, the request returns `429`, and current responses do not include `Retry-After`. If the account exists but login/refresh fails, the request returns the underlying `401` or upstream error.
|
||||
Gemini-compatible clients can also send `x-goog-api-key`, `?key=`, or `?api_key=` as the caller credential source.
|
||||
|
||||
### Admin Endpoints (`/admin/*`)
|
||||
@@ -109,6 +111,7 @@ Gemini-compatible clients can also send `x-goog-api-key`, `?key=`, or `?api_key=
|
||||
| GET | `/v1/responses/{response_id}` | Business | Query stored response (in-memory TTL) |
|
||||
| POST | `/v1/embeddings` | Business | OpenAI Embeddings API |
|
||||
| POST | `/v1/files` | Business | OpenAI Files upload (multipart/form-data) |
|
||||
| GET | `/v1/files/{file_id}` | Business | Retrieve uploaded file status |
|
||||
| GET | `/anthropic/v1/models` | None | Claude model list |
|
||||
| POST | `/anthropic/v1/messages` | Business | Claude messages |
|
||||
| POST | `/anthropic/v1/messages/count_tokens` | Business | Claude token counting |
|
||||
@@ -160,10 +163,13 @@ Gemini-compatible clients can also send `x-goog-api-key`, `?key=`, or `?api_key=
|
||||
| DELETE | `/admin/dev/captures` | Admin | Clear local packet-capture entries |
|
||||
| GET | `/admin/chat-history` | Admin | Read server-side conversation history |
|
||||
| DELETE | `/admin/chat-history` | Admin | Clear server-side conversation history |
|
||||
| GET | `/admin/chat-history/{id}` | Admin | Read one server-side conversation entry |
|
||||
| DELETE | `/admin/chat-history/{id}` | Admin | Delete one server-side conversation entry |
|
||||
| PUT | `/admin/chat-history/settings` | Admin | Update conversation history retention limit |
|
||||
| GET | `/admin/version` | Admin | Check current version and latest Release |
|
||||
|
||||
OpenAI `/v1/*` paths are canonical. For clients configured with the bare DS2API service URL, the same OpenAI handlers are also exposed through root shortcuts: `/models`, `/models/{id}`, `/chat/completions`, `/responses`, `/responses/{response_id}`, `/embeddings`, `/files`, and `/files/{file_id}`.
|
||||
|
||||
---
|
||||
|
||||
## Health Endpoints
|
||||
@@ -194,18 +200,16 @@ No auth required. Returns the currently supported DeepSeek native model list.
|
||||
{
|
||||
"object": "list",
|
||||
"data": [
|
||||
{"id": "deepseek-chat", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-reasoner", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-chat-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-reasoner-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-expert-chat", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-expert-reasoner", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-expert-chat-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-expert-reasoner-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-vision-chat", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-vision-reasoner", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-vision-chat-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-vision-reasoner-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []}
|
||||
{"id": "deepseek-v4-flash", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-flash-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-pro", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-pro-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-flash-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-flash-search-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-pro-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-pro-search-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-vision", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-vision-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -221,12 +225,17 @@ For `chat` / `responses` / `embeddings`, DS2API follows a wide-input/strict-outp
|
||||
3. If still unmatched, fall back by known family heuristics (`o*`, `gpt-*`, `claude-*`, etc.).
|
||||
4. If still unmatched, return `invalid_request_error`.
|
||||
|
||||
Current built-in default aliases (excerpt):
|
||||
Built-in aliases come from `internal/config/models.go`; `config.model_aliases` can override or add mappings at runtime. Excerpt:
|
||||
|
||||
- OpenAI: `gpt-4o`, `gpt-4.1`, `gpt-4.1-mini`, `gpt-4.1-nano`, `gpt-5`, `gpt-5-mini`, `gpt-5-codex`
|
||||
- OpenAI reasoning: `o1`, `o1-mini`, `o3`, `o3-mini`
|
||||
- Claude: `claude-sonnet-4-5`, `claude-haiku-4-5`, `claude-opus-4-6` (plus compatibility aliases `claude-3-5-sonnet` / `claude-3-5-haiku` / `claude-3-opus`)
|
||||
- Gemini: `gemini-2.5-pro`, `gemini-2.5-flash`
|
||||
- OpenAI / Codex: `gpt-4o`, `gpt-4.1`, `gpt-5`, `gpt-5.5`, `gpt-5-codex`, `gpt-5.3-codex`, `codex-mini-latest`
|
||||
- OpenAI reasoning: `o1`, `o3`, `o3-deep-research`, `o4-mini`
|
||||
- Claude: `claude-opus-4-6`, `claude-sonnet-4-6`, `claude-haiku-4-5`, `claude-3-5-sonnet-latest`
|
||||
- Gemini: `gemini-2.5-pro`, `gemini-2.5-flash`, `gemini-pro-vision`
|
||||
- Other compatibility families: `llama-*`, `qwen-*`, `mistral-*`, and `command-*` fall back through family heuristics
|
||||
|
||||
Current vision support resolves only to `deepseek-v4-vision` and does not expose a separate `vision-search` variant.
|
||||
|
||||
Retired historical families such as `claude-1.*`, `claude-2.*`, `claude-instant-*`, and `gpt-3.5*` are explicitly rejected.
|
||||
|
||||
### `POST /v1/chat/completions`
|
||||
|
||||
@@ -241,7 +250,7 @@ Content-Type: application/json
|
||||
|
||||
| Field | Type | Required | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `model` | string | ✅ | DeepSeek native models + common aliases (`gpt-5`, `gpt-5-mini`, `gpt-5-codex`, `o3`, `claude-opus-4-6`, `gemini-2.5-pro`, `gemini-2.5-flash`, etc.) |
|
||||
| `model` | string | ✅ | DeepSeek native models + common aliases (`gpt-5.5`, `gpt-5.4-mini`, `gpt-5.3-codex`, `o3`, `claude-opus-4-6`, `gemini-2.5-pro`, `gemini-2.5-flash`, etc.) |
|
||||
| `messages` | array | ✅ | OpenAI-style messages |
|
||||
| `stream` | boolean | ❌ | Default `false` |
|
||||
| `tools` | array | ❌ | Function calling schema |
|
||||
@@ -254,14 +263,14 @@ Content-Type: application/json
|
||||
"id": "<chat_session_id>",
|
||||
"object": "chat.completion",
|
||||
"created": 1738400000,
|
||||
"model": "deepseek-reasoner",
|
||||
"model": "deepseek-v4-pro",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "final response",
|
||||
"reasoning_content": "reasoning trace (reasoner models)"
|
||||
"reasoning_content": "reasoning trace (when thinking is enabled)"
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
@@ -296,10 +305,10 @@ data: [DONE]
|
||||
**Field notes**:
|
||||
|
||||
- First delta includes `role: assistant`
|
||||
- `deepseek-reasoner` / `deepseek-reasoner-search` models emit `delta.reasoning_content`
|
||||
- When thinking is enabled, the stream may emit `delta.reasoning_content`
|
||||
- Text emits `delta.content`
|
||||
- Last chunk includes `finish_reason` and `usage`
|
||||
- Token counting prefers pass-through from upstream DeepSeek SSE (`accumulated_token_usage` / `token_usage`), and only falls back to local estimation when upstream usage is absent
|
||||
- Token counting prefers pass-through from upstream DeepSeek SSE (`accumulated_token_usage` / `token_usage`), and only falls back to local estimation when upstream usage is absent. Failed/interrupted endings (for example `response.failed`) may not include `usage`
|
||||
|
||||
#### Tool Calls
|
||||
|
||||
@@ -336,7 +345,8 @@ When `tools` is present, DS2API performs anti-leak handling:
|
||||
|
||||
Additional notes:
|
||||
|
||||
- The parser currently follows XML/Markup-family tool payloads (`<tool_call>`, `<function_call>`, `<invoke>`, `tool_use`, antml variants). Standalone JSON `tool_calls` payloads are not treated as executable tool calls by default.
|
||||
- The parser treats DSML shell tool blocks (`<|DSML|tool_calls>` / `<|DSML|invoke name="...">` / `<|DSML|parameter name="...">`) and legacy canonical XML tool blocks (`<tool_calls>` / `<invoke name="...">` / `<parameter name="...">`) as executable tool calls. DSML is normalized back to XML at the parser entry; internal parsing remains XML-based. Legacy `<tools>`, `<tool_call>`, `<tool_name>`, `<param>`, `<function_call>`, `tool_use`, antml variants, and standalone JSON `tool_calls` payloads are treated as plain text.
|
||||
- If the final visible response text is empty but the reasoning stream contains an executable tool call, Chat / Responses emits a standard OpenAI `tool_calls` / `function_call` output during finalization. If thinking/reasoning was not enabled by the client, that reasoning text is used only for detection and is not exposed as visible text or `reasoning_content`.
|
||||
- `tool_calls` shown inside fenced markdown code blocks (for example, ```json ... ```) are treated as examples, not executable calls.
|
||||
|
||||
---
|
||||
@@ -414,7 +424,7 @@ Business auth required. Returns OpenAI-compatible embeddings shape.
|
||||
| `model` | string | ✅ | Supports native models + alias mapping |
|
||||
| `input` | string/array | ✅ | Supports string, string array, token array |
|
||||
|
||||
> Requires `embeddings.provider`. Current supported values: `mock` / `deterministic` / `builtin`. If missing/unsupported, returns standard error shape with HTTP 501.
|
||||
> Requires `embeddings.provider`. Current supported values: `mock` / `deterministic` / `builtin` (all three use the same local deterministic implementation). If missing/unsupported, returns standard error shape with HTTP 501.
|
||||
|
||||
### `POST /v1/files`
|
||||
|
||||
@@ -428,9 +438,13 @@ Business auth required. OpenAI Files-compatible upload endpoint; currently only
|
||||
Constraints and behavior:
|
||||
|
||||
- `Content-Type` must be `multipart/form-data` (otherwise `400`).
|
||||
- Total request size limit is `100 MiB` (over-limit returns `413`).
|
||||
- Total request size limit is **100 MiB** (over-limit returns `413`).
|
||||
- Success returns an OpenAI `file` object (`id/object/bytes/filename/purpose/status`, etc.) and includes `account_id` for source-account tracing.
|
||||
|
||||
### `GET /v1/files/{file_id}`
|
||||
|
||||
Business auth required. Retrieves the current DeepSeek upload status for a file and returns an OpenAI `file` object. Returns `404` when no matching file is found.
|
||||
|
||||
---
|
||||
|
||||
## Claude-Compatible API
|
||||
@@ -448,17 +462,17 @@ No auth required.
|
||||
{
|
||||
"object": "list",
|
||||
"data": [
|
||||
{"id": "claude-sonnet-4-5", "object": "model", "created": 1715635200, "owned_by": "anthropic"},
|
||||
{"id": "claude-sonnet-4-6", "object": "model", "created": 1715635200, "owned_by": "anthropic"},
|
||||
{"id": "claude-haiku-4-5", "object": "model", "created": 1715635200, "owned_by": "anthropic"},
|
||||
{"id": "claude-opus-4-6", "object": "model", "created": 1715635200, "owned_by": "anthropic"}
|
||||
],
|
||||
"first_id": "claude-opus-4-6",
|
||||
"last_id": "claude-instant-1.0",
|
||||
"last_id": "claude-3-haiku-20240307",
|
||||
"has_more": false
|
||||
}
|
||||
```
|
||||
|
||||
> Note: the example is partial; besides the current primary aliases, the real response also includes Claude 4.x snapshots plus historical 3.x / 2.x / 1.x IDs and common aliases.
|
||||
> Note: the example is partial; besides the current primary aliases, the real response also includes Claude 4.x snapshots plus historical 3.x IDs and common aliases.
|
||||
|
||||
### `POST /anthropic/v1/messages`
|
||||
|
||||
@@ -476,12 +490,19 @@ anthropic-version: 2023-06-01
|
||||
|
||||
| Field | Type | Required | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `model` | string | ✅ | For example `claude-sonnet-4-5` / `claude-opus-4-6` / `claude-haiku-4-5` (compatible with `claude-3-5-haiku-latest`), plus historical Claude model IDs |
|
||||
| `model` | string | ✅ | For example `claude-sonnet-4-6` / `claude-opus-4-6` / `claude-haiku-4-5` (compatible with `claude-3-5-haiku-latest`), plus historical Claude model IDs |
|
||||
| `messages` | array | ✅ | Claude-style messages |
|
||||
| `max_tokens` | number | ❌ | Auto-filled to `8192` when omitted; not strictly enforced by upstream bridge |
|
||||
| `stream` | boolean | ❌ | Default `false` |
|
||||
| `system` | string | ❌ | Optional system prompt |
|
||||
| `tools` | array | ❌ | Claude tool schema |
|
||||
| `thinking` | object | ❌ | Anthropic thinking config; translated into downstream reasoning control, and ignored by `-nothinking` models |
|
||||
| `temperature` | number | ❌ | Passed through to the downstream bridge; if `temperature` and `top_p` are both present, `temperature` wins |
|
||||
| `top_p` | number | ❌ | Passed through when `temperature` is absent |
|
||||
| `stop_sequences` | array | ❌ | Passed through as downstream stop sequences |
|
||||
| `tool_choice` | string/object | ❌ | Supports `auto` / `none` / `required` / `{"type":"function","name":"..."}` and is translated to downstream tool choice |
|
||||
|
||||
> Note: `thinking`, `temperature`, `top_p`, `stop_sequences`, and `tool_choice` are translated through the compatibility bridge. Final behavior still depends on the selected model and upstream support. When both `temperature` and `top_p` are present, `temperature` takes precedence.
|
||||
|
||||
#### Non-Stream Response
|
||||
|
||||
@@ -490,7 +511,7 @@ anthropic-version: 2023-06-01
|
||||
"id": "msg_1738400000000000000",
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": "claude-sonnet-4-5",
|
||||
"model": "claude-sonnet-4-6",
|
||||
"content": [
|
||||
{"type": "text", "text": "response"}
|
||||
],
|
||||
@@ -534,7 +555,7 @@ data: {"type":"message_stop"}
|
||||
|
||||
**Notes**:
|
||||
|
||||
- Models whose names contain `opus` / `reasoner` / `slow` stream `thinking_delta`
|
||||
- Models that support thinking emit `thinking` blocks / `thinking_delta` by default; explicit thinking disablement or `-nothinking` models suppress them
|
||||
- `signature_delta` is not emitted (DeepSeek does not provide verifiable thinking signatures)
|
||||
- In `tools` mode, the stream avoids leaking raw tool JSON and does not force `input_json_delta`
|
||||
|
||||
@@ -544,7 +565,7 @@ data: {"type":"message_stop"}
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "claude-sonnet-4-5",
|
||||
"model": "claude-sonnet-4-6",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello"}
|
||||
]
|
||||
@@ -580,6 +601,7 @@ Request body accepts Gemini-style `contents` / `tools`. Model names can use alia
|
||||
Response uses Gemini-compatible fields, including:
|
||||
|
||||
- `candidates[].content.parts[].text`
|
||||
- `candidates[].content.parts[].thought=true` for thinking output
|
||||
- `candidates[].content.parts[].functionCall` (when tool call is produced)
|
||||
- `usageMetadata` (`promptTokenCount` / `candidatesTokenCount` / `totalTokenCount`)
|
||||
|
||||
@@ -588,6 +610,7 @@ Response uses Gemini-compatible fields, including:
|
||||
Returns SSE (`text/event-stream`), each chunk as `data: <json>`:
|
||||
|
||||
- regular text: incremental text chunks
|
||||
- thinking: incremental chunks with `parts[].thought=true`
|
||||
- `tools` mode: buffered and emitted as `functionCall` at finalize phase
|
||||
- final chunk: includes `finishReason: "STOP"` and `usageMetadata`
|
||||
- Token counting prefers pass-through from upstream DeepSeek SSE (`accumulated_token_usage` / `token_usage`), and only falls back to local estimation when upstream usage is absent
|
||||
@@ -672,16 +695,16 @@ Returns sanitized config, including both `keys` and `api_keys`.
|
||||
"token_preview": "abcde..."
|
||||
}
|
||||
],
|
||||
"claude_mapping": {
|
||||
"fast": "deepseek-chat",
|
||||
"slow": "deepseek-reasoner"
|
||||
"model_aliases": {
|
||||
"claude-sonnet-4-6": "deepseek-v4-flash",
|
||||
"claude-opus-4-6": "deepseek-v4-pro"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `POST /admin/config`
|
||||
|
||||
Only updates `keys`, `api_keys`, `accounts`, and `claude_mapping`.
|
||||
Only updates `keys`, `api_keys`, `accounts`, and `model_aliases`.
|
||||
If both `api_keys` and `keys` are sent, the structured `api_keys` entries win so `name` / `remark` metadata is preserved; `keys` remains a legacy fallback.
|
||||
|
||||
**Request**:
|
||||
@@ -696,9 +719,9 @@ If both `api_keys` and `keys` are sent, the structured `api_keys` entries win so
|
||||
"accounts": [
|
||||
{"email": "user@example.com", "password": "pwd", "token": ""}
|
||||
],
|
||||
"claude_mapping": {
|
||||
"fast": "deepseek-chat",
|
||||
"slow": "deepseek-reasoner"
|
||||
"model_aliases": {
|
||||
"claude-sonnet-4-6": "deepseek-v4-flash",
|
||||
"claude-opus-4-6": "deepseek-v4-pro"
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -710,10 +733,10 @@ Reads runtime settings and status, including:
|
||||
- `success`
|
||||
- `admin` (`has_password_hash`, `jwt_expire_hours`, `jwt_valid_after_unix`, `default_password_warning`)
|
||||
- `runtime` (`account_max_inflight`, `account_max_queue`, `global_max_inflight`, `token_refresh_interval_hours`)
|
||||
- `compat` (`wide_input_strict_output`, `strip_reference_markers`)
|
||||
- `responses` / `embeddings`
|
||||
- `auto_delete` (`mode`: `none` / `single` / `all`; legacy `sessions=true` is still treated as `all`)
|
||||
- `claude_mapping` / `model_aliases`
|
||||
- `current_input_file` (`enabled` defaults to `true`, plus `min_chars`)
|
||||
- `model_aliases`
|
||||
- `env_backed`, `needs_vercel_sync`
|
||||
- `toolcall` policy is fixed to `feature_match + high` and is no longer returned or editable via settings
|
||||
|
||||
@@ -723,11 +746,10 @@ Hot-updates runtime settings. Supported fields:
|
||||
|
||||
- `admin.jwt_expire_hours`
|
||||
- `runtime.account_max_inflight` / `runtime.account_max_queue` / `runtime.global_max_inflight` / `runtime.token_refresh_interval_hours`
|
||||
- `compat.wide_input_strict_output` / `compat.strip_reference_markers`
|
||||
- `responses.store_ttl_seconds`
|
||||
- `embeddings.provider`
|
||||
- `auto_delete.mode`
|
||||
- `claude_mapping`
|
||||
- `current_input_file.enabled` / `current_input_file.min_chars`
|
||||
- `model_aliases`
|
||||
- `toolcall` policy is fixed and is no longer writable through settings
|
||||
|
||||
@@ -752,9 +774,9 @@ Imports full config with:
|
||||
|
||||
The request can send config directly, or wrapped as `{"config": {...}, "mode":"merge"}`.
|
||||
Query params `?mode=merge` / `?mode=replace` are also supported.
|
||||
Import accepts `keys`, `api_keys`, `accounts`, `claude_mapping` / `claude_model_mapping`, `model_aliases`, `admin`, `runtime`, `responses`, `embeddings`, and `auto_delete`; legacy `toolcall` fields are ignored.
|
||||
`replace` mode replaces the full config shape while preserving Vercel sync metadata. `merge` mode merges `keys`, `api_keys`, `accounts`, and `model_aliases`, and overwrites non-empty fields under `admin`, `runtime`, `responses`, and `embeddings`. Manage `auto_delete` and `current_input_file` via `/admin/settings` or the config file; legacy `compat` and `toolcall` fields are ignored.
|
||||
|
||||
> `compat` fields are managed via `/admin/settings` or the config file; this import endpoint does not update `compat`.
|
||||
> Note: `merge` mode does not update `auto_delete` or `current_input_file`.
|
||||
|
||||
### `GET /admin/config/export`
|
||||
|
||||
@@ -903,7 +925,7 @@ Updates proxy binding for a specific account.
|
||||
| Field | Required | Notes |
|
||||
| --- | --- | --- |
|
||||
| `identifier` | ✅ | email / mobile / token-only synthetic id |
|
||||
| `model` | ❌ | default `deepseek-chat` |
|
||||
| `model` | ❌ | default `deepseek-v4-flash` |
|
||||
| `message` | ❌ | if empty, only session creation is tested |
|
||||
|
||||
**Response**:
|
||||
@@ -914,14 +936,17 @@ Updates proxy binding for a specific account.
|
||||
"success": true,
|
||||
"response_time": 1240,
|
||||
"message": "API test successful (session creation only)",
|
||||
"model": "deepseek-chat",
|
||||
"model": "deepseek-v4-flash",
|
||||
"session_count": 0,
|
||||
"config_writable": true
|
||||
"config_writable": true,
|
||||
"config_warning": ""
|
||||
}
|
||||
```
|
||||
|
||||
If a `message` is provided, `thinking` may also be included when the upstream response carries reasoning text.
|
||||
|
||||
When the configured file path is not writable (for example, read-only `/app/config.json` inside some containers), login/session testing still proceeds; `config_warning` is returned to indicate token persistence failed and the token is memory-only until restart.
|
||||
|
||||
### `POST /admin/accounts/test-all`
|
||||
|
||||
Optional request field: `model`.
|
||||
@@ -985,7 +1010,7 @@ Test API availability through the service itself.
|
||||
|
||||
| Field | Required | Default |
|
||||
| --- | --- | --- |
|
||||
| `model` | ❌ | `deepseek-chat` |
|
||||
| `model` | ❌ | `deepseek-v4-flash` |
|
||||
| `message` | ❌ | `你好` |
|
||||
| `api_key` | ❌ | First key in config |
|
||||
|
||||
@@ -1009,7 +1034,7 @@ Common request fields:
|
||||
| --- | --- | --- | --- |
|
||||
| `message` | No | `你好` | Convenience single-turn user message |
|
||||
| `messages` | No | Auto-derived from `message` | OpenAI-style message array |
|
||||
| `model` | No | `deepseek-chat` | Target model |
|
||||
| `model` | No | `deepseek-v4-flash` | Target model |
|
||||
| `stream` | No | `true` | Recommended to keep streaming enabled so raw SSE is recorded |
|
||||
| `api_key` | No | First configured key | Business API key to use |
|
||||
| `sample_id` | No | Auto-generated | Sample directory name |
|
||||
@@ -1205,7 +1230,7 @@ Clients should handle HTTP status code plus `error` / `detail` fields.
|
||||
| Code | Meaning |
|
||||
| --- | --- |
|
||||
| `401` | Authentication failed (invalid key/token, or expired admin JWT) |
|
||||
| `429` | Too many requests (exceeded inflight + queue capacity) |
|
||||
| `429` | Too many requests (exceeded inflight + queue capacity; current responses do not include `Retry-After`) |
|
||||
| `503` | Model unavailable or upstream error |
|
||||
|
||||
---
|
||||
@@ -1219,7 +1244,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-chat",
|
||||
"model": "deepseek-v4-flash",
|
||||
"messages": [{"role": "user", "content": "Hello"}],
|
||||
"stream": false
|
||||
}'
|
||||
@@ -1232,7 +1257,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-reasoner",
|
||||
"model": "deepseek-v4-pro",
|
||||
"messages": [{"role": "user", "content": "Explain quantum entanglement"}],
|
||||
"stream": true
|
||||
}'
|
||||
@@ -1270,7 +1295,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-chat-search",
|
||||
"model": "deepseek-v4-flash-search",
|
||||
"messages": [{"role": "user", "content": "Latest news today"}],
|
||||
"stream": true
|
||||
}'
|
||||
@@ -1283,7 +1308,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-chat",
|
||||
"model": "deepseek-v4-flash",
|
||||
"messages": [{"role": "user", "content": "What is the weather in Beijing?"}],
|
||||
"tools": [
|
||||
{
|
||||
@@ -1344,7 +1369,7 @@ curl http://localhost:5001/anthropic/v1/messages \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "anthropic-version: 2023-06-01" \
|
||||
-d '{
|
||||
"model": "claude-sonnet-4-5",
|
||||
"model": "claude-sonnet-4-6",
|
||||
"max_tokens": 1024,
|
||||
"messages": [{"role": "user", "content": "Hello"}]
|
||||
}'
|
||||
@@ -1381,7 +1406,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "X-Ds2-Target-Account: user@example.com" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-chat",
|
||||
"model": "deepseek-v4-flash",
|
||||
"messages": [{"role": "user", "content": "Hello"}]
|
||||
}'
|
||||
```
|
||||
|
||||
165
API.md
165
API.md
@@ -31,14 +31,18 @@
|
||||
| Base URL | `http://localhost:5001` 或你的部署域名 |
|
||||
| 默认 Content-Type | `application/json` |
|
||||
| 健康检查 | `GET /healthz`、`GET /readyz` |
|
||||
| CORS | 已启用(`Access-Control-Allow-Origin: *`,允许 `Content-Type`, `Authorization`, `X-API-Key`, `X-Ds2-Target-Account`, `X-Ds2-Source`, `X-Vercel-Protection-Bypass`) |
|
||||
| CORS | 已启用(统一覆盖 `/v1/*`、`/anthropic/*`、`/v1beta/models/*`、`/admin/*`;浏览器有 `Origin` 时回显该 Origin,否则为 `*`;默认允许 `Content-Type`, `Authorization`, `X-API-Key`, `X-Ds2-Target-Account`, `X-Ds2-Source`, `X-Vercel-Protection-Bypass`, `X-Goog-Api-Key`, `Anthropic-Version`, `Anthropic-Beta`,并会放行预检里声明的第三方请求头,如 `x-stainless-*`;Vercel 上 `/v1/chat/completions` 的 Node Runtime 也对齐相同行为;内部专用头 `X-Ds2-Internal-Token` 仍被拦截) |
|
||||
|
||||
- 所有 JSON 请求体都必须是合法 UTF-8;非法字节序列会在入站阶段被拒绝为 `400 invalid json`。
|
||||
|
||||
### 3.0 接口适配层说明
|
||||
|
||||
- OpenAI / Claude / Gemini 三套协议已统一挂在同一 `chi` 路由树上,由 `internal/server/router.go` 负责装配。
|
||||
- 适配器层职责收敛为:**请求归一化 → DeepSeek 调用 → 协议形态渲染**,减少历史版本中“同能力多处实现”的分叉。
|
||||
- Tool Calling 的解析策略在 Go 与 Node Runtime 间保持一致:当前以 XML/Markup 家族解析为主(含 `<tool_call>` / `<function_call>` / `<invoke>` / `tool_use` / antml 变体),并在流式场景执行防泄漏筛分。
|
||||
- Tool Calling 的解析策略在 Go 与 Node Runtime 间保持一致:推荐模型输出 DSML 外壳 `<|DSML|tool_calls>` → `<|DSML|invoke name="...">` → `<|DSML|parameter name="...">`;兼容层也接受 DSML wrapper 别名 `<dsml|tool_calls>`、`<|tool_calls>`、`<|tool_calls>`、常见 DSML 分隔符漏写形态(如 `<|DSML tool_calls>`)、`DSML` 与工具标签名黏连的常见 typo(如 `<DSMLtool_calls>`),以及旧式 canonical XML `<tool_calls>` → `<invoke name="...">` → `<parameter name="...">`。实现上采用窄容错结构扫描:只有 `tool_calls` wrapper 或可修复的缺失 opening wrapper 会进入工具路径,裸 `<invoke>` 不计为已支持语法;流式场景继续执行防泄漏筛分。若参数体本身是合法 JSON 字面量(如 `123`、`true`、`null`、数组或对象),会按结构化值输出,不再一律当作字符串;若 CDATA 偶发漏闭合,则会在最终 parse / flush 恢复阶段做窄修复,尽量保住已完整包裹的外层工具调用。
|
||||
- `Admin API` 将配置与运行时策略分开:`/admin/config*` 管静态配置,`/admin/settings*` 管运行时行为。
|
||||
- 当上游返回 thinking-only 响应(模型输出了推理链但无可见文本)时,非流式补全会自动重试一次:以多轮对话 follow-up 方式追加 prompt 后缀 `"Previous reply had no visible output. Please regenerate the visible final answer or tool call now."` 并设置 `parent_message_id` 在同一 DeepSeek session 内让模型重新输出;重试最大 1 次。
|
||||
- 引用标记剥离(strip reference markers)当前为固定开启的运行时行为,所有协议适配层统一生效。
|
||||
|
||||
---
|
||||
|
||||
@@ -81,7 +85,7 @@ Vercel 一键部署可先只填 `DS2API_ADMIN_KEY`,部署后在 `/admin` 导
|
||||
- token 在 `config.keys` 中 → **托管账号模式**,自动轮询选择账号
|
||||
- token 不在 `config.keys` 中 → **直通 token 模式**,直接作为 DeepSeek token 使用
|
||||
|
||||
**可选请求头**:`X-Ds2-Target-Account: <email_or_mobile>` — 指定使用某个托管账号。
|
||||
**可选请求头**:`X-Ds2-Target-Account: <email_or_mobile>` — 指定使用某个托管账号;如果目标账号不存在,或管理账号队列已耗尽,相关业务请求会返回 `429`,当前不会附带 `Retry-After` 头。若账号存在但登录/刷新失败,则返回对应的 `401` 或上游错误。
|
||||
Gemini 兼容客户端还可以使用 `x-goog-api-key`、`?key=` 或 `?api_key=` 作为凭据来源。
|
||||
|
||||
### Admin 接口(`/admin/*`)
|
||||
@@ -109,6 +113,7 @@ Gemini 兼容客户端还可以使用 `x-goog-api-key`、`?key=` 或 `?api_key=`
|
||||
| GET | `/v1/responses/{response_id}` | 业务 | 查询已生成 response(内存 TTL) |
|
||||
| POST | `/v1/embeddings` | 业务 | OpenAI Embeddings 接口 |
|
||||
| POST | `/v1/files` | 业务 | OpenAI Files 上传(multipart/form-data) |
|
||||
| GET | `/v1/files/{file_id}` | 业务 | 查询已上传文件状态 |
|
||||
| GET | `/anthropic/v1/models` | 无 | Claude 模型列表 |
|
||||
| POST | `/anthropic/v1/messages` | 业务 | Claude 消息接口 |
|
||||
| POST | `/anthropic/v1/messages/count_tokens` | 业务 | Claude token 计数 |
|
||||
@@ -160,10 +165,13 @@ Gemini 兼容客户端还可以使用 `x-goog-api-key`、`?key=` 或 `?api_key=`
|
||||
| DELETE | `/admin/dev/captures` | Admin | 清空本地抓包记录 |
|
||||
| GET | `/admin/chat-history` | Admin | 查看服务器端对话记录 |
|
||||
| DELETE | `/admin/chat-history` | Admin | 清空服务器端对话记录 |
|
||||
| GET | `/admin/chat-history/{id}` | Admin | 查看单条服务器端对话记录 |
|
||||
| DELETE | `/admin/chat-history/{id}` | Admin | 删除单条服务器端对话记录 |
|
||||
| PUT | `/admin/chat-history/settings` | Admin | 更新对话记录保留条数 |
|
||||
| GET | `/admin/version` | Admin | 查询当前版本与最新 Release |
|
||||
|
||||
OpenAI `/v1/*` 仍是规范路径。对于只配置 DS2API 根地址的客户端,同一套 OpenAI handler 也通过根路径快捷路由暴露:`/models`、`/models/{id}`、`/chat/completions`、`/responses`、`/responses/{response_id}`、`/embeddings`、`/files`、`/files/{file_id}`。
|
||||
|
||||
---
|
||||
|
||||
## 健康检查
|
||||
@@ -194,23 +202,21 @@ Gemini 兼容客户端还可以使用 `x-goog-api-key`、`?key=` 或 `?api_key=`
|
||||
{
|
||||
"object": "list",
|
||||
"data": [
|
||||
{"id": "deepseek-chat", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-reasoner", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-chat-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-reasoner-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-expert-chat", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-expert-reasoner", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-expert-chat-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-expert-reasoner-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-vision-chat", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-vision-reasoner", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-vision-chat-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-vision-reasoner-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []}
|
||||
{"id": "deepseek-v4-flash", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-flash-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-pro", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-pro-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-flash-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-flash-search-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-pro-search", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-pro-search-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-vision", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []},
|
||||
{"id": "deepseek-v4-vision-nothinking", "object": "model", "created": 1677610602, "owned_by": "deepseek", "permission": []}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
> 说明:`/v1/models` 返回的是规范化后的 DeepSeek 原生模型 ID;常见 alias 仅用于请求入参解析,不会在该接口中单独展开返回。
|
||||
> 说明:`/v1/models` 返回的是规范化后的 DeepSeek 原生模型 ID;常见 alias 仅用于请求入参解析,不会在该接口中单独展开返回。带 `-nothinking` 后缀的模型表示无论请求里是否显式开启 thinking / reasoning,都会强制关闭思考输出。
|
||||
|
||||
### 模型 alias 解析策略
|
||||
|
||||
@@ -218,15 +224,22 @@ Gemini 兼容客户端还可以使用 `x-goog-api-key`、`?key=` 或 `?api_key=`
|
||||
|
||||
1. 先匹配 DeepSeek 原生模型。
|
||||
2. 再匹配 `model_aliases` 精确映射。
|
||||
3. 未命中时按模型家族规则回退(如 `o*`、`gpt-*`、`claude-*`)。
|
||||
4. 仍未命中则返回 `invalid_request_error`。
|
||||
3. 如果请求名以 `-nothinking` 结尾,则在最终解析出的规范模型上追加对应的无思考变体。
|
||||
4. 未命中时按模型家族规则回退(如 `o*`、`gpt-*`、`claude-*`)。
|
||||
5. 仍未命中则返回 `invalid_request_error`。
|
||||
|
||||
当前内置默认 alias(节选):
|
||||
当前内置默认 alias 来自 `internal/config/models.go`,`config.model_aliases` 会在运行时覆盖或补充同名映射。节选:
|
||||
|
||||
- OpenAI:`gpt-4o`、`gpt-4.1`、`gpt-4.1-mini`、`gpt-4.1-nano`、`gpt-5`、`gpt-5-mini`、`gpt-5-codex`
|
||||
- OpenAI Reasoning:`o1`、`o1-mini`、`o3`、`o3-mini`
|
||||
- Claude:`claude-sonnet-4-5`、`claude-haiku-4-5`、`claude-opus-4-6`(及 `claude-3-5-sonnet` / `claude-3-5-haiku` / `claude-3-opus` 兼容别名)
|
||||
- Gemini:`gemini-2.5-pro`、`gemini-2.5-flash`
|
||||
- OpenAI / Codex:`gpt-4o`、`gpt-4.1`、`gpt-5`、`gpt-5.5`、`gpt-5-codex`、`gpt-5.3-codex`、`codex-mini-latest`
|
||||
- OpenAI reasoning:`o1`、`o3`、`o3-deep-research`、`o4-mini`
|
||||
- Claude:`claude-opus-4-6`、`claude-sonnet-4-6`、`claude-haiku-4-5`、`claude-3-5-sonnet-latest`
|
||||
- Gemini:`gemini-2.5-pro`、`gemini-2.5-flash`、`gemini-pro-vision`
|
||||
- 其他兼容族:`llama-*`、`qwen-*`、`mistral-*`、`command-*` 会按家族启发式回退
|
||||
|
||||
上述 alias 若在请求名后追加 `-nothinking` 后缀,也会映射到对应的强制关闭 thinking 版本。
|
||||
当前视觉能力仅对应 `deepseek-v4-vision` / `deepseek-v4-vision-nothinking`,不会解析出独立的 `vision-search` 变体。
|
||||
|
||||
退役历史模型(如 `claude-1.*`、`claude-2.*`、`claude-instant-*`、`gpt-3.5*`)会被显式拒绝。
|
||||
|
||||
### `POST /v1/chat/completions`
|
||||
|
||||
@@ -241,7 +254,7 @@ Content-Type: application/json
|
||||
|
||||
| 字段 | 类型 | 必填 | 说明 |
|
||||
| --- | --- | --- | --- |
|
||||
| `model` | string | ✅ | 支持 DeepSeek 原生模型 + 常见 alias(如 `gpt-5`、`gpt-5-mini`、`gpt-5-codex`、`o3`、`claude-opus-4-6`、`gemini-2.5-pro`、`gemini-2.5-flash` 等) |
|
||||
| `model` | string | ✅ | 支持 DeepSeek 原生模型 + 常见 alias(如 `gpt-5.5`、`gpt-5.4-mini`、`gpt-5.3-codex`、`o3`、`claude-opus-4-6`、`claude-sonnet-4-6`、`gemini-2.5-pro`、`gemini-2.5-flash` 等);若模型名带 `-nothinking` 后缀,则强制关闭 thinking / reasoning |
|
||||
| `messages` | array | ✅ | OpenAI 风格消息数组 |
|
||||
| `stream` | boolean | ❌ | 默认 `false` |
|
||||
| `tools` | array | ❌ | Function Calling 定义 |
|
||||
@@ -254,14 +267,14 @@ Content-Type: application/json
|
||||
"id": "<chat_session_id>",
|
||||
"object": "chat.completion",
|
||||
"created": 1738400000,
|
||||
"model": "deepseek-reasoner",
|
||||
"model": "deepseek-v4-pro",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "最终回复",
|
||||
"reasoning_content": "思考内容(reasoner 模型)"
|
||||
"reasoning_content": "思考内容(开启 thinking 时)"
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
@@ -296,10 +309,10 @@ data: [DONE]
|
||||
**字段说明**:
|
||||
|
||||
- 首个 delta 包含 `role: assistant`
|
||||
- `deepseek-reasoner` / `deepseek-reasoner-search` 模型输出 `delta.reasoning_content`
|
||||
- 开启 thinking 时会输出 `delta.reasoning_content`
|
||||
- 普通文本输出 `delta.content`
|
||||
- 最后一段包含 `finish_reason` 和 `usage`
|
||||
- token 计数优先透传上游 DeepSeek SSE(如 `accumulated_token_usage` / `token_usage`);仅在上游缺失时回退本地估算
|
||||
- token 计数优先透传上游 DeepSeek SSE(如 `accumulated_token_usage` / `token_usage`);仅在上游缺失时回退本地估算。失败/中断型结束(例如 `response.failed`)可能不会携带 `usage`
|
||||
|
||||
#### Tool Calls
|
||||
|
||||
@@ -337,7 +350,8 @@ data: [DONE]
|
||||
补充说明:
|
||||
|
||||
- **非代码块上下文**下,工具负载即使与普通文本混合,也会按特征识别并产出可执行 tool call(前后普通文本仍可透传)。
|
||||
- 解析器当前走 XML/Markup 家族(包含 `<tool_call>`、`<function_call>`、`<invoke>`、`tool_use`、antml 风格);纯 JSON `tool_calls` 片段默认不会直接作为可执行调用解析。
|
||||
- 解析器当前把 DSML 外壳(`<|DSML|tool_calls>` / `<|DSML|invoke name="...">` / `<|DSML|parameter name="...">`)、DSML wrapper 别名(`<dsml|tool_calls>`、`<|tool_calls>`、`<|tool_calls>`)、常见 DSML 分隔符漏写形态(如 `<|DSML tool_calls>` / `<|DSML invoke>` / `<|DSML parameter>`)、`DSML` 与工具标签名黏连的常见 typo(如 `<DSMLtool_calls>` / `<DSMLinvoke>` / `<DSMLparameter>`)和旧式 canonical XML 工具块(`<tool_calls>` / `<invoke name="...">` / `<parameter name="...">`)作为可执行调用解析;DSML 会先归一化回 XML,内部仍以 XML 解析语义为准。旧式 `<tools>`、`<tool_call>`、`<tool_name>`、`<param>`、`<function_call>`、`tool_use`、antml 风格与纯 JSON `tool_calls` 片段默认都会按普通文本处理。
|
||||
- 当最终可见正文为空但思维链里包含可执行工具调用时,Chat / Responses 会在收尾阶段补发标准 OpenAI `tool_calls` / `function_call` 输出;如果客户端未开启 thinking / reasoning,该思维链只用于检测,不会作为可见正文或 `reasoning_content` 暴露。
|
||||
- Markdown fenced code block(例如 ```json ... ```)中的 `tool_calls` 仅视为示例文本,不会被执行。
|
||||
|
||||
---
|
||||
@@ -415,7 +429,7 @@ data: [DONE]
|
||||
| `model` | string | ✅ | 支持原生模型 + alias 自动映射 |
|
||||
| `input` | string/array | ✅ | 支持字符串、字符串数组、token 数组 |
|
||||
|
||||
> 需配置 `embeddings.provider`。当前支持:`mock` / `deterministic` / `builtin`。未配置或不支持时返回标准错误结构(HTTP 501)。
|
||||
> 需配置 `embeddings.provider`。当前支持:`mock` / `deterministic` / `builtin`(三者都走同一套本地确定性实现)。未配置或不支持时返回标准错误结构(HTTP 501)。
|
||||
|
||||
### `POST /v1/files`
|
||||
|
||||
@@ -429,9 +443,13 @@ data: [DONE]
|
||||
约束与行为:
|
||||
|
||||
- 请求必须为 `multipart/form-data`,否则返回 `400`。
|
||||
- 请求体总大小上限 `100 MiB`(超限返回 `413`)。
|
||||
- 请求体总大小上限 **100 MiB**(超限返回 `413`)。
|
||||
- 成功返回 OpenAI `file` 对象(`id/object/bytes/filename/purpose/status` 等字段),并附带 `account_id` 便于定位来源账号。
|
||||
|
||||
### `GET /v1/files/{file_id}`
|
||||
|
||||
需要业务鉴权。查询 DeepSeek 上传文件的当前状态,并返回 OpenAI `file` 对象;未找到匹配文件时返回 `404`。
|
||||
|
||||
---
|
||||
|
||||
## Claude 兼容接口
|
||||
@@ -449,17 +467,20 @@ data: [DONE]
|
||||
{
|
||||
"object": "list",
|
||||
"data": [
|
||||
{"id": "claude-sonnet-4-5", "object": "model", "created": 1715635200, "owned_by": "anthropic"},
|
||||
{"id": "claude-sonnet-4-6", "object": "model", "created": 1715635200, "owned_by": "anthropic"},
|
||||
{"id": "claude-sonnet-4-6-nothinking", "object": "model", "created": 1715635200, "owned_by": "anthropic"},
|
||||
{"id": "claude-haiku-4-5", "object": "model", "created": 1715635200, "owned_by": "anthropic"},
|
||||
{"id": "claude-opus-4-6", "object": "model", "created": 1715635200, "owned_by": "anthropic"}
|
||||
{"id": "claude-haiku-4-5-nothinking", "object": "model", "created": 1715635200, "owned_by": "anthropic"},
|
||||
{"id": "claude-opus-4-6", "object": "model", "created": 1715635200, "owned_by": "anthropic"},
|
||||
{"id": "claude-opus-4-6-nothinking", "object": "model", "created": 1715635200, "owned_by": "anthropic"}
|
||||
],
|
||||
"first_id": "claude-opus-4-6",
|
||||
"last_id": "claude-instant-1.0",
|
||||
"last_id": "claude-3-haiku-20240307-nothinking",
|
||||
"has_more": false
|
||||
}
|
||||
```
|
||||
|
||||
> 说明:示例仅展示部分模型;实际返回除当前主别名外,还包含 Claude 4.x snapshots,以及 3.x / 2.x / 1.x 历史模型 ID 与常见别名。
|
||||
> 说明:示例仅展示部分模型;实际返回除当前主别名外,还包含 Claude 4.x snapshots、3.x 历史模型 ID 与常见别名,并为这些可映射模型额外提供 `-nothinking` 变体。
|
||||
|
||||
### `POST /anthropic/v1/messages`
|
||||
|
||||
@@ -477,12 +498,19 @@ anthropic-version: 2023-06-01
|
||||
|
||||
| 字段 | 类型 | 必填 | 说明 |
|
||||
| --- | --- | --- | --- |
|
||||
| `model` | string | ✅ | 例如 `claude-sonnet-4-5` / `claude-opus-4-6` / `claude-haiku-4-5`(兼容 `claude-3-5-haiku-latest`),并支持历史 Claude 模型 ID |
|
||||
| `model` | string | ✅ | 例如 `claude-sonnet-4-6` / `claude-opus-4-6` / `claude-haiku-4-5`(兼容 `claude-sonnet-4-5`、`claude-3-5-haiku-latest`),并支持历史 Claude 模型 ID;若模型名带 `-nothinking` 后缀,则强制关闭 thinking / reasoning |
|
||||
| `messages` | array | ✅ | Claude 风格消息数组 |
|
||||
| `max_tokens` | number | ❌ | 缺省自动补 `8192`;当前实现不会硬性截断上游输出 |
|
||||
| `stream` | boolean | ❌ | 默认 `false` |
|
||||
| `system` | string | ❌ | 可选系统提示 |
|
||||
| `tools` | array | ❌ | Claude tool 定义 |
|
||||
| `thinking` | object | ❌ | Anthropic thinking 配置;会转译为下游 reasoning 控制,`-nothinking` 模型会忽略 |
|
||||
| `temperature` | number | ❌ | 透传到下游;若同时提供 `top_p`,以 `temperature` 为准 |
|
||||
| `top_p` | number | ❌ | 当未提供 `temperature` 时透传到下游 |
|
||||
| `stop_sequences` | array | ❌ | 透传到下游停用序列 |
|
||||
| `tool_choice` | string/object | ❌ | 支持 `auto` / `none` / `required` / `{"type":"function","name":"..."}`,并会转译为下游工具选择 |
|
||||
|
||||
> 说明:上述 `thinking`、`temperature`、`top_p`、`stop_sequences`、`tool_choice` 都会走兼容层转译;最终是否生效仍取决于当前模型和上游能力。`temperature` 与 `top_p` 同时存在时,`temperature` 优先。
|
||||
|
||||
#### 非流式响应
|
||||
|
||||
@@ -491,7 +519,7 @@ anthropic-version: 2023-06-01
|
||||
"id": "msg_1738400000000000000",
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": "claude-sonnet-4-5",
|
||||
"model": "claude-sonnet-4-6",
|
||||
"content": [
|
||||
{"type": "text", "text": "回复内容"}
|
||||
],
|
||||
@@ -535,7 +563,8 @@ data: {"type":"message_stop"}
|
||||
|
||||
**说明**:
|
||||
|
||||
- 名称中包含 `opus` / `reasoner` / `slow` 的模型会输出 `thinking_delta`
|
||||
- 默认支持 thinking 的模型会输出 `thinking` block / `thinking_delta`;请求显式关闭 thinking 或使用 `-nothinking` 模型时不会输出
|
||||
- 带 `-nothinking` 后缀的模型会强制关闭 thinking,即使请求显式传了 `thinking` / `reasoning` / `reasoning_effort` 也不会输出 `thinking_delta`
|
||||
- 不会输出 `signature_delta`(上游 DeepSeek 未提供可验证签名)
|
||||
- `tools` 场景优先避免泄露原始工具 JSON,不强制发送 `input_json_delta`
|
||||
|
||||
@@ -545,7 +574,7 @@ data: {"type":"message_stop"}
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "claude-sonnet-4-5",
|
||||
"model": "claude-sonnet-4-6",
|
||||
"messages": [
|
||||
{"role": "user", "content": "你好"}
|
||||
]
|
||||
@@ -576,11 +605,12 @@ data: {"type":"message_stop"}
|
||||
|
||||
### `POST /v1beta/models/{model}:generateContent`
|
||||
|
||||
请求体兼容 Gemini `contents` / `tools` 字段,模型名可用 alias 自动映射到 DeepSeek 模型。
|
||||
请求体兼容 Gemini `contents` / `tools` 字段,模型名可用 alias 自动映射到 DeepSeek 模型;若路径中的模型名带 `-nothinking` 后缀,则最终会映射到对应的无思考模型。
|
||||
|
||||
响应为 Gemini 兼容结构,核心字段包括:
|
||||
|
||||
- `candidates[].content.parts[].text`
|
||||
- `candidates[].content.parts[].thought=true`(thinking 输出)
|
||||
- `candidates[].content.parts[].functionCall`(工具调用时)
|
||||
- `usageMetadata`(`promptTokenCount` / `candidatesTokenCount` / `totalTokenCount`)
|
||||
|
||||
@@ -589,6 +619,7 @@ data: {"type":"message_stop"}
|
||||
返回 SSE(`text/event-stream`),每个 chunk 为一条 `data: <json>`:
|
||||
|
||||
- 常规文本:持续返回增量文本 chunk
|
||||
- thinking:持续返回 `parts[].thought=true` 的增量 chunk
|
||||
- `tools` 场景:会缓冲并在结束时输出 `functionCall` 结构
|
||||
- 结束 chunk:包含 `finishReason: "STOP"` 与 `usageMetadata`
|
||||
- token 计数优先透传上游 DeepSeek SSE(如 `accumulated_token_usage` / `token_usage`);仅在上游缺失时回退本地估算
|
||||
@@ -673,16 +704,16 @@ data: {"type":"message_stop"}
|
||||
"token_preview": "abcde..."
|
||||
}
|
||||
],
|
||||
"claude_mapping": {
|
||||
"fast": "deepseek-chat",
|
||||
"slow": "deepseek-reasoner"
|
||||
"model_aliases": {
|
||||
"claude-sonnet-4-6": "deepseek-v4-flash",
|
||||
"claude-opus-4-6": "deepseek-v4-pro"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `POST /admin/config`
|
||||
|
||||
只更新 `keys`、`api_keys`、`accounts`、`claude_mapping`。
|
||||
只更新 `keys`、`api_keys`、`accounts`、`model_aliases`。
|
||||
如果同时发送 `api_keys` 与 `keys`,优先保留 `api_keys` 中的结构化 `name` / `remark`;`keys` 仅作为旧格式兼容回退。
|
||||
|
||||
**请求**:
|
||||
@@ -697,9 +728,9 @@ data: {"type":"message_stop"}
|
||||
"accounts": [
|
||||
{"email": "user@example.com", "password": "pwd", "token": ""}
|
||||
],
|
||||
"claude_mapping": {
|
||||
"fast": "deepseek-chat",
|
||||
"slow": "deepseek-reasoner"
|
||||
"model_aliases": {
|
||||
"claude-sonnet-4-6": "deepseek-v4-flash",
|
||||
"claude-opus-4-6": "deepseek-v4-pro"
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -711,10 +742,10 @@ data: {"type":"message_stop"}
|
||||
- `success`
|
||||
- `admin`(`has_password_hash`、`jwt_expire_hours`、`jwt_valid_after_unix`、`default_password_warning`)
|
||||
- `runtime`(`account_max_inflight`、`account_max_queue`、`global_max_inflight`、`token_refresh_interval_hours`)
|
||||
- `compat`(`wide_input_strict_output`、`strip_reference_markers`)
|
||||
- `responses` / `embeddings`
|
||||
- `auto_delete`(`mode`:`none` / `single` / `all`;旧配置 `sessions=true` 仍按 `all` 处理)
|
||||
- `claude_mapping` / `model_aliases`
|
||||
- `current_input_file`(`enabled` 默认返回 `true`、`min_chars`)
|
||||
- `model_aliases`
|
||||
- `env_backed`、`needs_vercel_sync`
|
||||
- `toolcall` 策略已固定为 `feature_match + high`,不再通过 settings 返回或修改
|
||||
|
||||
@@ -724,11 +755,10 @@ data: {"type":"message_stop"}
|
||||
|
||||
- `admin.jwt_expire_hours`
|
||||
- `runtime.account_max_inflight` / `runtime.account_max_queue` / `runtime.global_max_inflight` / `runtime.token_refresh_interval_hours`
|
||||
- `compat.wide_input_strict_output` / `compat.strip_reference_markers`
|
||||
- `responses.store_ttl_seconds`
|
||||
- `embeddings.provider`
|
||||
- `auto_delete.mode`
|
||||
- `claude_mapping`
|
||||
- `current_input_file.enabled` / `current_input_file.min_chars`
|
||||
- `model_aliases`
|
||||
- `toolcall` 策略已固定,不再作为可写入字段
|
||||
|
||||
@@ -753,9 +783,9 @@ data: {"type":"message_stop"}
|
||||
|
||||
请求可直接传配置对象,或使用 `{"config": {...}, "mode":"merge"}` 包裹格式。
|
||||
也支持在查询参数里传 `?mode=merge` / `?mode=replace`。
|
||||
导入时会接受 `keys`、`api_keys`、`accounts`、`claude_mapping` / `claude_model_mapping`、`model_aliases`、`admin`、`runtime`、`responses`、`embeddings`、`auto_delete` 等字段;`toolcall` 相关字段会被忽略。
|
||||
`replace` 模式会按完整配置结构替换(保留 Vercel 同步元信息);`merge` 模式会合并 `keys`、`api_keys`、`accounts`、`model_aliases`,并覆盖 `admin`、`runtime`、`responses`、`embeddings` 中的非空字段。`auto_delete`、`current_input_file` 建议通过 `/admin/settings` 或配置文件管理;`compat` 与 `toolcall` 相关字段会被忽略。
|
||||
|
||||
> `compat` 相关字段请通过 `/admin/settings` 或配置文件管理;该导入接口不会更新 `compat`。
|
||||
> 注意:`merge` 模式不会更新 `auto_delete`、`current_input_file`。
|
||||
|
||||
### `GET /admin/config/export`
|
||||
|
||||
@@ -907,7 +937,7 @@ data: {"type":"message_stop"}
|
||||
| 字段 | 必填 | 说明 |
|
||||
| --- | --- | --- |
|
||||
| `identifier` | ✅ | email / mobile / token-only 合成标识 |
|
||||
| `model` | ❌ | 默认 `deepseek-chat` |
|
||||
| `model` | ❌ | 默认 `deepseek-v4-flash` |
|
||||
| `message` | ❌ | 空字符串时仅测试会话创建 |
|
||||
|
||||
**响应**:
|
||||
@@ -918,14 +948,17 @@ data: {"type":"message_stop"}
|
||||
"success": true,
|
||||
"response_time": 1240,
|
||||
"message": "API 测试成功(仅会话创建)",
|
||||
"model": "deepseek-chat",
|
||||
"model": "deepseek-v4-flash",
|
||||
"session_count": 0,
|
||||
"config_writable": true
|
||||
"config_writable": true,
|
||||
"config_warning": ""
|
||||
}
|
||||
```
|
||||
|
||||
如果传入 `message`,还会附带 `thinking`(当上游返回思考内容时)。
|
||||
|
||||
当部署环境配置文件路径不可写(例如容器内默认 `/app/config.json` 只读)时,登录与会话测试仍可继续;此时会返回 `config_warning` 提示 token 仅保存在内存、重启后丢失。
|
||||
|
||||
### `POST /admin/accounts/test-all`
|
||||
|
||||
可选请求字段:`model`
|
||||
@@ -988,7 +1021,7 @@ data: {"type":"message_stop"}
|
||||
|
||||
| 字段 | 必填 | 默认值 |
|
||||
| --- | --- | --- |
|
||||
| `model` | ❌ | `deepseek-chat` |
|
||||
| `model` | ❌ | `deepseek-v4-flash` |
|
||||
| `message` | ❌ | `你好` |
|
||||
| `api_key` | ❌ | 配置中第一个 key |
|
||||
|
||||
@@ -1012,7 +1045,7 @@ data: {"type":"message_stop"}
|
||||
| --- | --- | --- | --- |
|
||||
| `message` | 否 | `你好` | 便捷单轮用户消息 |
|
||||
| `messages` | 否 | 自动由 `message` 生成 | OpenAI 风格消息数组 |
|
||||
| `model` | 否 | `deepseek-chat` | 目标模型 |
|
||||
| `model` | 否 | `deepseek-v4-flash` | 目标模型 |
|
||||
| `stream` | 否 | `true` | 建议保留流式,以记录原始 SSE |
|
||||
| `api_key` | 否 | 配置中第一个 key | 调用业务接口使用的 key |
|
||||
| `sample_id` | 否 | 自动生成 | 样本目录名 |
|
||||
@@ -1208,7 +1241,7 @@ Gemini 路由使用 Google 风格错误结构:
|
||||
| 状态码 | 说明 |
|
||||
| --- | --- |
|
||||
| `401` | 鉴权失败(key/token 无效,或 Admin JWT 过期) |
|
||||
| `429` | 请求过多(超出并发上限 + 等待队列) |
|
||||
| `429` | 请求过多(超出并发上限 + 等待队列;当前不附带 `Retry-After` 头) |
|
||||
| `503` | 模型不可用或上游服务异常 |
|
||||
|
||||
---
|
||||
@@ -1222,7 +1255,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-chat",
|
||||
"model": "deepseek-v4-flash",
|
||||
"messages": [{"role": "user", "content": "你好"}],
|
||||
"stream": false
|
||||
}'
|
||||
@@ -1235,7 +1268,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-reasoner",
|
||||
"model": "deepseek-v4-pro",
|
||||
"messages": [{"role": "user", "content": "解释一下量子纠缠"}],
|
||||
"stream": true
|
||||
}'
|
||||
@@ -1248,7 +1281,7 @@ curl http://localhost:5001/v1/responses \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "gpt-5-codex",
|
||||
"model": "gpt-5.3-codex",
|
||||
"input": "写一个 golang 的 hello world",
|
||||
"stream": true
|
||||
}'
|
||||
@@ -1273,7 +1306,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-chat-search",
|
||||
"model": "deepseek-v4-flash-search",
|
||||
"messages": [{"role": "user", "content": "今天的新闻"}],
|
||||
"stream": true
|
||||
}'
|
||||
@@ -1286,7 +1319,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-chat",
|
||||
"model": "deepseek-v4-flash",
|
||||
"messages": [{"role": "user", "content": "北京今天天气怎么样?"}],
|
||||
"tools": [
|
||||
{
|
||||
@@ -1347,7 +1380,7 @@ curl http://localhost:5001/anthropic/v1/messages \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "anthropic-version: 2023-06-01" \
|
||||
-d '{
|
||||
"model": "claude-sonnet-4-5",
|
||||
"model": "claude-sonnet-4-6",
|
||||
"max_tokens": 1024,
|
||||
"messages": [{"role": "user", "content": "你好"}]
|
||||
}'
|
||||
@@ -1384,7 +1417,7 @@ curl http://localhost:5001/v1/chat/completions \
|
||||
-H "X-Ds2-Target-Account: user@example.com" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "deepseek-chat",
|
||||
"model": "deepseek-v4-flash",
|
||||
"messages": [{"role": "user", "content": "你好"}]
|
||||
}'
|
||||
```
|
||||
|
||||
128
CODE_OF_CONDUCT.md
Normal file
128
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
cjackhwang@qq.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
16
Dockerfile
16
Dockerfile
@@ -3,6 +3,7 @@ FROM node:24 AS webui-builder
|
||||
WORKDIR /app/webui
|
||||
COPY webui/package.json webui/package-lock.json ./
|
||||
RUN npm ci
|
||||
COPY config.example.json /app/config.example.json
|
||||
COPY webui ./
|
||||
RUN npm run build
|
||||
|
||||
@@ -19,7 +20,7 @@ RUN set -eux; \
|
||||
GOARCH="${TARGETARCH:-$(go env GOARCH)}"; \
|
||||
BUILD_VERSION_RESOLVED="${BUILD_VERSION:-}"; \
|
||||
if [ -z "${BUILD_VERSION_RESOLVED}" ] && [ -f VERSION ]; then BUILD_VERSION_RESOLVED="$(cat VERSION | tr -d "[:space:]")"; fi; \
|
||||
CGO_ENABLED=0 GOOS="${GOOS}" GOARCH="${GOARCH}" go build -ldflags="-s -w -X ds2api/internal/version.BuildVersion=${BUILD_VERSION_RESOLVED}" -o /out/ds2api ./cmd/ds2api
|
||||
CGO_ENABLED=0 GOOS="${GOOS}" GOARCH="${GOARCH}" go build -buildvcs=false -ldflags="-s -w -X ds2api/internal/version.BuildVersion=${BUILD_VERSION_RESOLVED}" -o /out/ds2api ./cmd/ds2api
|
||||
|
||||
FROM busybox:1.36.1-musl AS busybox-tools
|
||||
|
||||
@@ -27,6 +28,8 @@ FROM debian:bookworm-slim AS runtime-base
|
||||
WORKDIR /app
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends ca-certificates \
|
||||
&& groupadd -r ds2api && useradd -r -g ds2api -d /app -s /sbin/nologin ds2api \
|
||||
&& mkdir -p /app/data /data && chown -R ds2api:ds2api /app /data \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=busybox-tools /bin/busybox /usr/local/bin/busybox
|
||||
EXPOSE 5001
|
||||
@@ -35,8 +38,9 @@ CMD ["/usr/local/bin/ds2api"]
|
||||
FROM runtime-base AS runtime-from-source
|
||||
COPY --from=go-builder /out/ds2api /usr/local/bin/ds2api
|
||||
|
||||
COPY --from=go-builder /app/config.example.json /app/config.example.json
|
||||
COPY --from=webui-builder /app/static/admin /app/static/admin
|
||||
COPY --from=go-builder --chown=ds2api:ds2api /app/config.example.json /app/config.example.json
|
||||
COPY --from=webui-builder --chown=ds2api:ds2api /app/static/admin /app/static/admin
|
||||
USER ds2api
|
||||
|
||||
FROM busybox-tools AS dist-extract
|
||||
ARG TARGETARCH
|
||||
@@ -53,14 +57,14 @@ RUN set -eux; \
|
||||
test -n "${PKG_DIR}"; \
|
||||
mkdir -p /out/static; \
|
||||
cp "${PKG_DIR}/ds2api" /out/ds2api; \
|
||||
|
||||
cp "${PKG_DIR}/config.example.json" /out/config.example.json; \
|
||||
cp -R "${PKG_DIR}/static/admin" /out/static/admin
|
||||
|
||||
FROM runtime-base AS runtime-from-dist
|
||||
COPY --from=dist-extract /out/ds2api /usr/local/bin/ds2api
|
||||
|
||||
COPY --from=dist-extract /out/config.example.json /app/config.example.json
|
||||
COPY --from=dist-extract /out/static/admin /app/static/admin
|
||||
COPY --from=dist-extract --chown=ds2api:ds2api /out/config.example.json /app/config.example.json
|
||||
COPY --from=dist-extract --chown=ds2api:ds2api /out/static/admin /app/static/admin
|
||||
USER ds2api
|
||||
|
||||
FROM runtime-from-source AS final
|
||||
|
||||
308
README.MD
308
README.MD
@@ -4,17 +4,20 @@
|
||||
|
||||
# DS2API
|
||||
|
||||
<a href="https://trendshift.io/repositories/24508" target="_blank"><img src="https://trendshift.io/api/badge/repositories/24508" alt="CJackHwang%2Fds2api | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
[](LICENSE)
|
||||

|
||||

|
||||
[](https://github.com/CJackHwang/ds2api/releases)
|
||||
[](docs/DEPLOY.md)
|
||||
|
||||
[](https://zeabur.com/templates/L4CFHP)
|
||||
[](https://vercel.com/new/clone?repository-url=https://github.com/CJackHwang/ds2api)
|
||||
|
||||
语言 / Language: [中文](README.MD) | [English](README.en.md)
|
||||
|
||||
将 DeepSeek Web 对话能力转换为 OpenAI、Claude 与 Gemini 兼容 API。后端为 **Go 全量实现**,前端为 React WebUI 管理台(源码在 `webui/`,部署时自动构建到 `static/admin`)。
|
||||
将 DeepSeek Web 对话能力转换为 OpenAI、Claude 与 Gemini 兼容 API。核心后端以 **Go** 实现,Vercel 流式桥接额外使用少量 Node Runtime,前端为 React WebUI 管理台(源码在 `webui/`,部署时自动构建到 `static/admin`)。
|
||||
|
||||
文档入口:[文档导航](docs/README.md) / [架构说明](docs/ARCHITECTURE.md) / [接口文档](API.md)
|
||||
|
||||
@@ -28,6 +31,30 @@
|
||||
>
|
||||
> 请勿将本项目用于违反服务条款、协议、法律法规或平台规则的场景。商业使用前请自行确认 `LICENSE`、相关协议以及你是否获得了作者的书面许可。
|
||||
|
||||
## 目录
|
||||
|
||||
- [架构概览(摘要)](#架构概览摘要)
|
||||
- [核心能力](#核心能力)
|
||||
- [平台兼容矩阵](#平台兼容矩阵)
|
||||
- [模型支持](#模型支持)
|
||||
- [OpenAI 接口](#openai-接口get-v1models)
|
||||
- [Claude 接口](#claude-接口get-anthropicv1models)
|
||||
- [Gemini 接口](#gemini-接口)
|
||||
- [快速开始](#快速开始)
|
||||
- [方式一:下载 Release 构建包](#方式一下载-release-构建包)
|
||||
- [方式二:Docker 运行](#方式二docker-运行)
|
||||
- [方式三:Vercel 部署](#方式三vercel-部署)
|
||||
- [方式四:本地源码运行](#方式四本地源码运行)
|
||||
- [配置说明](#配置说明)
|
||||
- [鉴权模式](#鉴权模式)
|
||||
- [并发模型](#并发模型)
|
||||
- [Tool Call 适配](#tool-call-适配)
|
||||
- [本地开发抓包工具](#本地开发抓包工具)
|
||||
- [文档索引](#文档索引)
|
||||
- [测试](#测试)
|
||||
- [Release 自动构建(GitHub Actions)](#release-自动构建github-actions)
|
||||
- [免责声明](#免责声明)
|
||||
|
||||
## 架构概览(摘要)
|
||||
|
||||
```mermaid
|
||||
@@ -35,25 +62,28 @@ flowchart LR
|
||||
Client["🖥️ 客户端 / SDK\n(OpenAI / Claude / Gemini)"]
|
||||
Upstream["☁️ DeepSeek API"]
|
||||
|
||||
subgraph DS2API["DS2API 3.x(统一 OpenAI 内核)"]
|
||||
subgraph DS2API["DS2API 4.x(模块化 HTTP surface + PromptCompat 内核)"]
|
||||
Router["chi Router + 中间件\n(RequestID / RealIP / Logger / Recoverer / CORS)"]
|
||||
|
||||
subgraph Adapters["协议适配层"]
|
||||
OA["OpenAI\n/v1/*"]
|
||||
subgraph HTTP["HTTP API surface"]
|
||||
OA["OpenAI\nchat / responses / files / embeddings"]
|
||||
CA["Claude\n/anthropic/* + /v1/messages"]
|
||||
GA["Gemini\n/v1beta/models/* + /v1/models/*"]
|
||||
Admin["Admin API\n/admin/*"]
|
||||
Admin["Admin API\n资源子包"]
|
||||
WebUI["WebUI\n/admin(静态托管)"]
|
||||
Vercel["Vercel Node Stream\n/v1/chat/completions"]
|
||||
end
|
||||
|
||||
subgraph Runtime["运行时核心能力"]
|
||||
Bridge["CLIProxy 转换桥\n(多协议 <-> OpenAI)"]
|
||||
OAEngine["OpenAI ChatCompletions\n(统一工具调用与流式语义)"]
|
||||
Compat["PromptCompat\n(API -> 网页纯文本上下文)"]
|
||||
Completion["Completion Runtime\n(Session / PoW / Completion)"]
|
||||
Turn["AssistantTurn\n(输出语义归一)"]
|
||||
Auth["Auth Resolver\n(API key / bearer / x-goog-api-key)"]
|
||||
Pool["Account Pool + Queue\n(并发槽位 + 等待队列)"]
|
||||
DSClient["DeepSeek Client\n(Session / Auth / HTTP)"]
|
||||
Pow["PoW 实现\n(纯 Go 毫秒级)"]
|
||||
DSClient["DeepSeek Client\n(Session / Auth / Completion / Files)"]
|
||||
Pow["PoW 实现\n(纯 Go)"]
|
||||
Tool["Tool Sieve\n(Go/Node 语义对齐)"]
|
||||
History["Current Input File\n(DS2API_HISTORY.txt)"]
|
||||
end
|
||||
end
|
||||
|
||||
@@ -61,19 +91,24 @@ flowchart LR
|
||||
Router --> OA & CA & GA
|
||||
Router --> Admin
|
||||
Router --> WebUI
|
||||
Router --> Vercel
|
||||
|
||||
OA --> OAEngine
|
||||
CA & GA --> Bridge
|
||||
Bridge --> OAEngine
|
||||
OAEngine --> Auth
|
||||
OAEngine -.账号轮询.-> Pool
|
||||
OAEngine -.工具调用解析.-> Tool
|
||||
OAEngine -.PoW 计算.-> Pow
|
||||
OA --> Compat
|
||||
CA & GA --> Compat
|
||||
Compat --> Completion
|
||||
Completion -.完整上下文.-> History
|
||||
Completion --> Turn
|
||||
Vercel -.Go prepare.-> Completion
|
||||
Vercel -.Node SSE.-> Tool
|
||||
Completion --> Auth
|
||||
Completion -.账号轮询.-> Pool
|
||||
Completion -.工具调用解析.-> Tool
|
||||
Completion -.PoW 计算.-> Pow
|
||||
Auth --> DSClient
|
||||
DSClient --> Upstream
|
||||
Upstream --> DSClient
|
||||
OAEngine --> Bridge
|
||||
Bridge --> Client
|
||||
Turn --> Client
|
||||
Vercel --> Client
|
||||
```
|
||||
|
||||
详细架构拆分与目录职责见 [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md)。
|
||||
@@ -86,9 +121,10 @@ flowchart LR
|
||||
|
||||
| 能力 | 说明 |
|
||||
| --- | --- |
|
||||
| OpenAI 兼容 | `GET /v1/models`、`GET /v1/models/{id}`、`POST /v1/chat/completions`、`POST /v1/responses`、`GET /v1/responses/{response_id}`、`POST /v1/embeddings`、`POST /v1/files` |
|
||||
| OpenAI 兼容 | `GET /v1/models`、`GET /v1/models/{id}`、`POST /v1/chat/completions`、`POST /v1/responses`、`GET /v1/responses/{response_id}`、`POST /v1/embeddings`、`POST /v1/files`、`GET /v1/files/{file_id}` |
|
||||
| Claude 兼容 | `GET /anthropic/v1/models`、`POST /anthropic/v1/messages`、`POST /anthropic/v1/messages/count_tokens`(及快捷路径 `/v1/messages`、`/messages`) |
|
||||
| Gemini 兼容 | `POST /v1beta/models/{model}:generateContent`、`POST /v1beta/models/{model}:streamGenerateContent`(及 `/v1/models/{model}:*` 路径) |
|
||||
| 统一 CORS 兼容 | `/v1/*`、`/anthropic/*`、`/v1beta/models/*`、`/admin/*` 统一走同一套 CORS 策略;Vercel 上 `/v1/chat/completions` 的 Node Runtime 也对齐相同放行规则,尽量减少第三方预检请求头限制 |
|
||||
| 多账号轮询 | 自动 token 刷新、邮箱/手机号双登录方式 |
|
||||
| 并发队列控制 | 每账号 in-flight 上限 + 等待队列,动态计算建议并发值 |
|
||||
| DeepSeek PoW | 纯 Go 高性能实现(DeepSeekHashV1),毫秒级响应 |
|
||||
@@ -97,6 +133,8 @@ flowchart LR
|
||||
| WebUI 管理台 | `/admin` 单页应用(中英文双语、深色模式,支持查看服务器端对话记录) |
|
||||
| 运维探针 | `GET /healthz`(存活)、`GET /readyz`(就绪) |
|
||||
|
||||
OpenAI `/v1/*` 仍是推荐的规范路径;同时支持 `/models`、`/chat/completions`、`/responses`、`/embeddings`、`/files`、`/files/{file_id}` 等根路径快捷路由,方便只配置 DS2API 根地址的第三方客户端。
|
||||
|
||||
## 平台兼容矩阵
|
||||
|
||||
| 级别 | 平台 | 当前状态 |
|
||||
@@ -114,42 +152,44 @@ flowchart LR
|
||||
|
||||
| 模型类型 | 模型 ID | thinking | search |
|
||||
| --- | --- | --- | --- |
|
||||
| default | `deepseek-chat` | ❌ | ❌ |
|
||||
| default | `deepseek-reasoner` | ✅ | ❌ |
|
||||
| default | `deepseek-chat-search` | ❌ | ✅ |
|
||||
| default | `deepseek-reasoner-search` | ✅ | ✅ |
|
||||
| expert | `deepseek-expert-chat` | ❌ | ❌ |
|
||||
| expert | `deepseek-expert-reasoner` | ✅ | ❌ |
|
||||
| expert | `deepseek-expert-chat-search` | ❌ | ✅ |
|
||||
| expert | `deepseek-expert-reasoner-search` | ✅ | ✅ |
|
||||
| vision | `deepseek-vision-chat` | ❌ | ❌ |
|
||||
| vision | `deepseek-vision-reasoner` | ✅ | ❌ |
|
||||
| vision | `deepseek-vision-chat-search` | ❌ | ✅ |
|
||||
| vision | `deepseek-vision-reasoner-search` | ✅ | ✅ |
|
||||
| default | `deepseek-v4-flash` | 默认开启,可由请求参数控制 | ❌ |
|
||||
| default | `deepseek-v4-flash-nothinking` | 永久关闭,不受请求参数影响 | ❌ |
|
||||
| expert | `deepseek-v4-pro` | 默认开启,可由请求参数控制 | ❌ |
|
||||
| expert | `deepseek-v4-pro-nothinking` | 永久关闭,不受请求参数影响 | ❌ |
|
||||
| default | `deepseek-v4-flash-search` | 默认开启,可由请求参数控制 | ✅ |
|
||||
| default | `deepseek-v4-flash-search-nothinking` | 永久关闭,不受请求参数影响 | ✅ |
|
||||
| expert | `deepseek-v4-pro-search` | 默认开启,可由请求参数控制 | ✅ |
|
||||
| expert | `deepseek-v4-pro-search-nothinking` | 永久关闭,不受请求参数影响 | ✅ |
|
||||
| vision | `deepseek-v4-vision` | 默认开启,可由请求参数控制 | ❌ |
|
||||
| vision | `deepseek-v4-vision-nothinking` | 永久关闭,不受请求参数影响 | ❌ |
|
||||
|
||||
除原生模型外,也支持常见 alias 输入(如 `gpt-5`、`gpt-5-mini`、`gpt-5-codex`、`gpt-4.1`、`o3`、`claude-opus-4-6`、`claude-sonnet-4-5`、`gemini-2.5-pro`、`gemini-2.5-flash` 等),但 `/v1/models` 返回的是规范化后的 DeepSeek 原生模型 ID。
|
||||
除原生模型外,也支持常见 alias 输入(如 `gpt-4.1`、`gpt-5`、`gpt-5-codex`、`o3`、`claude-*`、`gemini-*` 等),但 `/v1/models` 返回的是规范化后的 DeepSeek 原生模型 ID。若 alias 名本身追加 `-nothinking` 后缀,也会映射到对应的强制关思考模型。完整 alias 行为以 [API.md](API.md#模型-alias-解析策略) 和 `config.example.json` 为准。
|
||||
当前上游视觉模型只暴露 `vision` 通道,不提供独立的联网搜索视觉变体。
|
||||
|
||||
### Claude 接口(`GET /anthropic/v1/models`)
|
||||
|
||||
| 当前常用模型 | 默认映射 |
|
||||
| --- | --- |
|
||||
| `claude-sonnet-4-5` | `deepseek-chat` |
|
||||
| `claude-haiku-4-5`(兼容 `claude-3-5-haiku-latest`) | `deepseek-chat` |
|
||||
| `claude-opus-4-6` | `deepseek-reasoner` |
|
||||
| `claude-sonnet-4-6` | `deepseek-v4-flash` |
|
||||
| `claude-sonnet-4-6-nothinking` | `deepseek-v4-flash-nothinking` |
|
||||
| `claude-haiku-4-5`(兼容 `claude-3-5-haiku-latest`) | `deepseek-v4-flash` |
|
||||
| `claude-haiku-4-5-nothinking` | `deepseek-v4-flash-nothinking` |
|
||||
| `claude-opus-4-6` | `deepseek-v4-pro` |
|
||||
| `claude-opus-4-6-nothinking` | `deepseek-v4-pro-nothinking` |
|
||||
|
||||
可通过配置中的 `claude_mapping` 或 `claude_model_mapping` 覆盖映射关系。
|
||||
`/anthropic/v1/models` 除上述当前主别名外,还会返回 Claude 4.x snapshots,以及 3.x / 2.x / 1.x 历史模型 ID 与常见 alias,便于旧客户端直接兼容。
|
||||
可通过配置中的 `model_aliases` 覆盖映射关系;若请求模型名带 `-nothinking`,会在最终映射结果上强制追加无思考语义。
|
||||
`/anthropic/v1/models` 除上述主别名外,还会返回 Claude 4.x snapshots、3.x 历史模型 ID 与常见 alias,便于旧客户端直接兼容。
|
||||
|
||||
#### Claude Code 接入避坑(实测)
|
||||
|
||||
- `ANTHROPIC_BASE_URL` 推荐直接指向 DS2API 根地址(例如 `http://127.0.0.1:5001`),Claude Code 会请求 `/v1/messages?beta=true`。
|
||||
- `ANTHROPIC_API_KEY` 需要与 `config.json` 中 `keys` 一致;建议同时保留常规 key 与 `sk-ant-*` 形态 key,兼容不同客户端校验习惯。
|
||||
- 若系统设置了代理,建议对 DS2API 地址配置 `NO_PROXY=127.0.0.1,localhost,<你的主机IP>`,避免本地回环请求被代理拦截。
|
||||
- 如遇“工具调用输出成文本、未执行”问题,请优先检查模型输出是否为受支持的 XML/Markup 工具块(例如 `<tool_call>` / `<function_call>` / `<invoke>` / `tool_use`),而不是纯 JSON `tool_calls` 片段。
|
||||
- 如遇“工具调用输出成文本、未执行”问题,请优先检查模型输出是否为推荐的 DSML 工具块:`<|DSML|tool_calls><|DSML|invoke name="..."><|DSML|parameter name="...">...`。兼容层也接受旧式 canonical XML:`<tool_calls><invoke name="..."><parameter name="...">...`;旧式 `<tools>` / `<tool_call>` / `<tool_name>` / `<param>`、`<function_call>`、`tool_use` 或纯 JSON `tool_calls` 片段不会执行。
|
||||
|
||||
### Gemini 接口
|
||||
|
||||
Gemini 适配器将模型名通过 `model_aliases` 或内置规则映射到 DeepSeek 原生模型,支持 `generateContent` 和 `streamGenerateContent` 两种调用方式,并完整支持 Tool Calling(`functionDeclarations` → `functionCall` 输出)。
|
||||
Gemini 适配器将模型名通过 `model_aliases` 或内置规则映射到 DeepSeek 原生模型,支持 `generateContent` 和 `streamGenerateContent` 两种调用方式,并完整支持 Tool Calling(`functionDeclarations` → `functionCall` 输出)。若 Gemini 模型名带 `-nothinking` 后缀,例如 `gemini-2.5-pro-nothinking`,会映射到对应的强制关闭思考模型。
|
||||
|
||||
## 快速开始
|
||||
|
||||
@@ -208,6 +248,8 @@ docker-compose logs -f
|
||||
```
|
||||
|
||||
默认 `docker-compose.yml` 会把宿主机 `6011` 映射到容器内的 `5001`。如果你希望直接对外暴露 `5001`,请设置 `DS2API_HOST_PORT=5001`(或者手动调整 `ports` 配置)。
|
||||
同时默认把 `./config.json` 挂载到容器 `/data/config.json`,并设置 `DS2API_CONFIG_PATH=/data/config.json`,用于避免 `/app` 只读导致运行时 token 持久化失败。
|
||||
镜像会预创建 `/data` 并授权给非 root 的 `ds2api` 用户;如果使用单文件 bind mount,请确保宿主机 `config.json` 对容器用户可读写,例如 `chmod 644 config.json`。
|
||||
|
||||
更新镜像:`docker-compose up -d --build`
|
||||
|
||||
@@ -217,6 +259,10 @@ docker-compose logs -f
|
||||
2. 部署完成后访问 `/admin`,使用 Zeabur 环境变量/模板指引中的 `DS2API_ADMIN_KEY` 登录。
|
||||
3. 在管理台导入/编辑配置(会写入并持久化到 `/data/config.json`)。
|
||||
|
||||
Zeabur 首次空卷启动时可以没有 `/data/config.json`;DS2API 会先使用空的文件模式配置启动,并在管理台首次保存时创建该文件。
|
||||
|
||||
不依赖模板手动部署时,在 Zeabur 中选择 GitHub 仓库服务,Root Directory 保持 `/`,使用仓库根目录 `Dockerfile` 构建;添加持久卷 `/data`,设置 `PORT=5001`、`DS2API_ADMIN_KEY=你的强密钥`、`DS2API_CONFIG_PATH=/data/config.json`,然后暴露 HTTP 端口 `5001`。更完整步骤见 [docs/DEPLOY.md](docs/DEPLOY.md#不使用模板手动部署)。
|
||||
|
||||
说明:Zeabur 使用仓库内 `Dockerfile` 直接构建时,不需要额外传入 `BUILD_VERSION`;镜像会优先读取该构建参数,未提供时自动回退到仓库根目录的 `VERSION` 文件。
|
||||
|
||||
### 方式三:Vercel 部署
|
||||
@@ -239,13 +285,13 @@ cp config.example.json config.json
|
||||
base64 < config.json | tr -d '\n'
|
||||
```
|
||||
|
||||
> **流式说明**:`/v1/chat/completions` 在 Vercel 上默认走 `api/chat-stream.js`(Node Runtime)以保证实时 SSE。鉴权、账号选择、会话/PoW 准备仍由 Go 内部 prepare 接口完成;流式响应(含 `tools`)在 Node 侧执行与 Go 对齐的输出组装与防泄漏处理。
|
||||
> **流式说明**:`/v1/chat/completions` 在 Vercel 上默认走 `api/chat-stream.js`(Node Runtime)以保证实时 SSE。鉴权、账号选择、会话/PoW 准备仍由 Go 内部 prepare 接口完成;流式响应(含 `tools`)在 Node 侧执行与 Go 对齐的输出组装与防泄漏处理。虽然这里只有 OpenAI chat 流式走 Node,但 CORS 放行策略仍与 Go 主路由保持一致,统一覆盖第三方客户端预检场景。
|
||||
|
||||
详细部署说明请参阅 [部署指南](docs/DEPLOY.md)。
|
||||
|
||||
### 方式四:本地源码运行
|
||||
|
||||
**前置要求**:Go 1.26+,Node.js `20.19+` 或 `22.12+`(仅在需要构建 WebUI 时)
|
||||
**前置要求**:Go 1.26+,Node.js `20.19+` 或 `22.12+`(仅在需要构建 WebUI 时);同时确保 `npm` 可用,建议 `npm 10+`
|
||||
|
||||
```bash
|
||||
# 1. 克隆仓库
|
||||
@@ -264,127 +310,24 @@ go run ./cmd/ds2api
|
||||
|
||||
服务实际绑定:`0.0.0.0:5001`,因此同一局域网设备通常也可以通过你的内网 IP 访问。
|
||||
|
||||
> **WebUI 自动构建**:本地首次启动时,若 `static/admin` 不存在,会自动尝试执行 `npm ci`(仅在缺少依赖时)和 `npm run build -- --outDir static/admin --emptyOutDir`(需要本机有 Node.js)。你也可以手动构建:`./scripts/build-webui.sh`
|
||||
> **WebUI 自动构建**:本地首次启动时,若 `static/admin` 不存在,会自动尝试执行 `npm ci`(仅在缺少依赖时)和 `npm run build -- --outDir static/admin --emptyOutDir`(需要本机有 Node.js 和 npm)。你也可以手动构建:`./scripts/build-webui.sh`
|
||||
|
||||
## 配置说明
|
||||
|
||||
### `config.json` 示例
|
||||
`README` 只保留快速入口,完整字段请以 [config.example.json](config.example.json) 为模板,并参考 [部署指南](docs/DEPLOY.md#0-前置要求) 与 [API 配置最佳实践](API.md#配置最佳实践)。
|
||||
|
||||
```json
|
||||
{
|
||||
"keys": ["your-api-key-1", "your-api-key-2"],
|
||||
"api_keys": [
|
||||
{
|
||||
"key": "your-api-key-1",
|
||||
"name": "主 Key",
|
||||
"remark": "生产流量"
|
||||
}
|
||||
],
|
||||
"accounts": [
|
||||
{
|
||||
"name": "账号 A",
|
||||
"remark": "主账号",
|
||||
"email": "user@example.com",
|
||||
"password": "your-password"
|
||||
},
|
||||
{
|
||||
"mobile": "12345678901",
|
||||
"password": "your-password"
|
||||
}
|
||||
],
|
||||
"model_aliases": {
|
||||
"gpt-4o": "deepseek-chat",
|
||||
"gpt-5": "deepseek-chat",
|
||||
"gpt-5-mini": "deepseek-chat",
|
||||
"gpt-5-codex": "deepseek-reasoner",
|
||||
"o3": "deepseek-reasoner",
|
||||
"claude-opus-4-6": "deepseek-reasoner",
|
||||
"gemini-2.5-flash": "deepseek-chat"
|
||||
},
|
||||
"compat": {
|
||||
"wide_input_strict_output": true,
|
||||
"strip_reference_markers": true
|
||||
},
|
||||
"responses": {
|
||||
"store_ttl_seconds": 900
|
||||
},
|
||||
"embeddings": {
|
||||
"provider": "deterministic"
|
||||
},
|
||||
"claude_mapping": {
|
||||
"fast": "deepseek-chat",
|
||||
"slow": "deepseek-reasoner"
|
||||
},
|
||||
"admin": {
|
||||
"jwt_expire_hours": 24
|
||||
},
|
||||
"runtime": {
|
||||
"account_max_inflight": 2,
|
||||
"account_max_queue": 0,
|
||||
"global_max_inflight": 0,
|
||||
"token_refresh_interval_hours": 6
|
||||
},
|
||||
"auto_delete": {
|
||||
"mode": "none"
|
||||
}
|
||||
}
|
||||
```
|
||||
常用字段:
|
||||
|
||||
- `keys`:API 访问密钥列表,客户端通过 `Authorization: Bearer <key>` 鉴权
|
||||
- `api_keys`:推荐使用的新结构化密钥列表,支持 `key` + `name` + `remark`(`keys` 仍兼容)
|
||||
- `accounts`:DeepSeek 账号列表,支持 `email` 或 `mobile` 登录;可额外填写 `name` / `remark` 便于管理
|
||||
- `token`:配置文件中即使填写也会在加载时被清空(不会从 `config.json` 读取 token);实际 token 仅在运行时内存中维护并自动刷新
|
||||
- `model_aliases`:常见模型名(如 GPT/Codex/Claude)到 DeepSeek 模型的映射
|
||||
- `compat.wide_input_strict_output`:建议保持 `true`(当前实现默认宽进严出)
|
||||
- `compat.strip_reference_markers`:建议保持 `true`,用于清理可见输出中的引用/标记
|
||||
- `toolcall`:旧字段,当前实现已固定为特征匹配 + 高置信早发;即使保留在配置里也会被忽略
|
||||
- `responses.store_ttl_seconds`:`/v1/responses/{id}` 的内存缓存 TTL
|
||||
- `embeddings.provider`:embedding 提供方(当前内置 `deterministic/mock/builtin`)
|
||||
- `claude_mapping`:字典中 `fast`/`slow` 后缀映射到对应 DeepSeek 模型(兼容读取 `claude_model_mapping`)
|
||||
- `admin`:管理后台设置(JWT 过期时间、密码哈希等),可通过 Admin Settings API 热更新
|
||||
- `runtime`:运行时参数(并发限制、队列大小、托管账号 token 刷新间隔),可通过 Admin Settings API 热更新;`account_max_queue=0`/`global_max_inflight=0` 表示按推荐值自动计算,`token_refresh_interval_hours=6` 为默认强制重登间隔
|
||||
- `auto_delete.mode`:请求结束后如何清理 DeepSeek 远端聊天记录,支持 `none`(默认,不删除)、`single`(仅删除当前会话)、`all`(清空全部会话);旧配置里的 `auto_delete.sessions=true` 仍会被视为 `all`
|
||||
- `keys` / `api_keys`:客户端访问密钥,`api_keys` 支持 `name` 与 `remark` 元信息,`keys` 继续兼容。
|
||||
- `accounts`:DeepSeek 托管账号,支持 `email` 或 `mobile` 登录,可配置代理、名称和备注。
|
||||
- `model_aliases`:OpenAI / Claude / Gemini 共用的模型 alias 映射。
|
||||
- `runtime`:账号并发、队列与 token 刷新策略,可通过 Admin Settings 热更新。
|
||||
- `auto_delete.mode`:请求结束后的远端会话清理策略,支持 `none` / `single` / `all`。
|
||||
- `current_input_file`:全局生效的上下文拆分上传策略;默认开启且阈值为 `0`,触发时将完整上下文合并上传为 `DS2API_HISTORY.txt` 上下文文件。
|
||||
- 如果关闭 `current_input_file`,请求会直接透传,不上传拆分上下文文件。
|
||||
- `thinking_injection`:默认开启;在最新 user 消息末尾追加思考增强提示词,提高高强度推理与工具调用前的思考稳定性;`prompt` 留空时使用内置默认提示词。
|
||||
|
||||
### 环境变量
|
||||
|
||||
> 建议:长期维护请优先以 `config.json`(或其 Base64)为单一配置源。环境变量仅保留部署必需项;`DS2API_CONFIG_JSON` 主要用于 Vercel/无持久盘场景,后续可能进一步收敛。
|
||||
|
||||
| 变量 | 用途 | 默认值 |
|
||||
| --- | --- | --- |
|
||||
| `PORT` | 服务端口 | `5001` |
|
||||
| `LOG_LEVEL` | 日志级别 | `INFO`(可选:`DEBUG`/`WARN`/`ERROR`) |
|
||||
| `DS2API_ADMIN_KEY` | Admin 登录密钥 | `admin` |
|
||||
| `DS2API_JWT_SECRET` | Admin JWT 签名密钥 | 等同 `DS2API_ADMIN_KEY` |
|
||||
| `DS2API_JWT_EXPIRE_HOURS` | Admin JWT 过期小时数 | `24` |
|
||||
| `DS2API_CONFIG_PATH` | 配置文件路径 | `config.json` |
|
||||
| `DS2API_CONFIG_JSON` | 直接注入配置(JSON 或 Base64) | — |
|
||||
| `DS2API_CHAT_HISTORY_PATH` | 服务器端对话记录文件路径 | `data/chat_history.json` |
|
||||
| `DS2API_ENV_WRITEBACK` | 环境变量模式下自动写回配置文件并切换文件模式(`1/true/yes/on`) | 关闭 |
|
||||
| `DS2API_STATIC_ADMIN_DIR` | 管理台静态文件目录 | `static/admin` |
|
||||
| `DS2API_AUTO_BUILD_WEBUI` | 启动时自动构建 WebUI | 本地开启,Vercel 关闭 |
|
||||
| `DS2API_DEV_PACKET_CAPTURE` | 本地开发抓包开关(记录最近会话请求/响应体) | 本地非 Vercel 默认开启 |
|
||||
| `DS2API_DEV_PACKET_CAPTURE_LIMIT` | 本地抓包保留条数(超出自动淘汰) | `20` |
|
||||
| `DS2API_DEV_PACKET_CAPTURE_MAX_BODY_BYTES` | 单条响应体最大记录字节数 | `5242880` |
|
||||
| `DS2API_ACCOUNT_MAX_INFLIGHT` | 每账号最大并发 in-flight 请求数 | `2` |
|
||||
| `DS2API_ACCOUNT_MAX_QUEUE` | 等待队列上限 | `recommended_concurrency` |
|
||||
| `DS2API_GLOBAL_MAX_INFLIGHT` | 全局最大 in-flight 请求数 | `recommended_concurrency` |
|
||||
| `DS2API_VERCEL_INTERNAL_SECRET` | Vercel 混合流式内部鉴权密钥 | 回退用 `DS2API_ADMIN_KEY` |
|
||||
| `DS2API_VERCEL_STREAM_LEASE_TTL_SECONDS` | 流式 lease 过期秒数 | `900` |
|
||||
| `VERCEL_TOKEN` | Vercel 同步 token | — |
|
||||
| `VERCEL_PROJECT_ID` | Vercel 项目 ID | — |
|
||||
| `VERCEL_TEAM_ID` | Vercel 团队 ID | — |
|
||||
| `DS2API_VERCEL_PROTECTION_BYPASS` | Vercel 部署保护绕过密钥(内部 Node→Go 调用) | — |
|
||||
|
||||
> 提示:当检测到 `DS2API_CONFIG_JSON` 时,管理台会显示当前模式风险与自动持久化状态(含 `DS2API_CONFIG_PATH` 路径与模式切换说明)。
|
||||
|
||||
#### 必填 / 可选(按部署方式)
|
||||
|
||||
- **所有部署都必填**:`DS2API_ADMIN_KEY`
|
||||
- **配置来源二选一(推荐前者)**:
|
||||
- `config.json` 文件(推荐,持久化更直观)
|
||||
- `DS2API_CONFIG_JSON`(可选,适合 Vercel;支持 JSON 或 Base64)
|
||||
- **仅在环境变量配置模式建议开启**:`DS2API_ENV_WRITEBACK=1`(避免管理台改动重启后丢失)
|
||||
- 其余环境变量均为可选调优项。
|
||||
环境变量完整列表见 [部署指南](docs/DEPLOY.md),接口鉴权规则见 [API.md](API.md#鉴权规则)。
|
||||
|
||||
## 鉴权模式
|
||||
|
||||
@@ -396,6 +339,7 @@ go run ./cmd/ds2api
|
||||
| **直通 token 模式** | 传入 token 不在 `config.keys` 中时,直接作为 DeepSeek token 使用 |
|
||||
|
||||
可选请求头 `X-Ds2-Target-Account`:指定使用某个托管账号(值为 email 或 mobile)。
|
||||
如果指定账号不存在,或者当前管理账号队列已满,请求会返回 `429`;当前 `429` 不附带 `Retry-After` 头。若账号存在但登录/刷新失败,则返回对应的鉴权错误。
|
||||
Gemini 路由还可以使用 `x-goog-api-key`,或在没有认证头时使用 `?key=` / `?api_key=` 作为调用方凭据。
|
||||
|
||||
## 并发模型
|
||||
@@ -408,7 +352,7 @@ Gemini 路由还可以使用 `x-goog-api-key`,或在没有认证头时使用 `
|
||||
```
|
||||
|
||||
- 当 in-flight 槽位满时,请求进入等待队列,**不会立即 429**
|
||||
- 超出总承载上限后才返回 `429 Too Many Requests`
|
||||
- 超出总承载上限后才返回 `429 Too Many Requests`,当前响应不附带 `Retry-After`
|
||||
- `GET /admin/queue/status` 返回实时并发状态
|
||||
|
||||
## Tool Call 适配
|
||||
@@ -416,14 +360,14 @@ Gemini 路由还可以使用 `x-goog-api-key`,或在没有认证头时使用 `
|
||||
当请求中带 `tools` 时,DS2API 会做防泄漏处理与结构化转译:
|
||||
|
||||
1. 只在**非代码块上下文**启用执行型 toolcall 识别(代码块示例默认不触发)
|
||||
2. 解析层当前以 XML/Markup 家族为准(`<tool_call>` / `<function_call>` / `<invoke>` / `tool_use` / antml 变体);纯 JSON `tool_calls` 片段默认不作为可执行调用解析
|
||||
2. 解析层当前把 DSML 外壳视为推荐可执行调用:`<|DSML|tool_calls>` → `<|DSML|invoke name="...">` → `<|DSML|parameter name="...">`;兼容旧式 canonical XML `<tool_calls>` → `<invoke name="...">` → `<parameter name="...">`。DSML 只是外壳别名,内部仍以 XML 解析语义为准;旧式 `<tools>` / `<tool_call>` / `<tool_name>` / `<param>`、`<function_call>`、`tool_use` / antml 变体与纯 JSON `tool_calls` 片段都会按普通文本处理
|
||||
3. `responses` 流式严格使用官方 item 生命周期事件(`response.output_item.*`、`response.content_part.*`、`response.function_call_arguments.*`)
|
||||
4. `responses` 支持并执行 `tool_choice`(`auto`/`none`/`required`/强制函数);`required` 违规时非流式返回 `422`,流式返回 `response.failed`
|
||||
5. 客户端请求哪种协议,就按该协议返回工具调用(OpenAI/Claude/Gemini 各自原生结构);模型侧优先约束输出规范 XML,再由兼容层转译
|
||||
|
||||
> 说明:当前版本在 parser 层仍以“尽量解析成功”为优先,未启用基于 allow-list 的工具名硬拒绝。
|
||||
> 说明:当前版本 parser 层以”尽量解析成功”为优先,所有格式合法的 XML 工具调用都会通过,不做工具名 allow-list 过滤。
|
||||
>
|
||||
> 想评估“把工具调用封装成 XML 再输入模型”的方案,可参考:`docs/toolcall-semantics.md`。
|
||||
> 想评估”把工具调用封装成 XML 再输入模型”的方案,可参考:`docs/toolcall-semantics.md`。
|
||||
|
||||
## 本地开发抓包工具
|
||||
|
||||
@@ -467,44 +411,18 @@ go run ./cmd/ds2api
|
||||
|
||||
## 测试
|
||||
|
||||
```bash
|
||||
# 单元测试(Go + Node)
|
||||
./tests/scripts/run-unit-all.sh
|
||||
|
||||
# 一键端到端全链路测试(真实账号,生成完整请求/响应日志)
|
||||
./tests/scripts/run-live.sh
|
||||
|
||||
# 或自定义参数
|
||||
go run ./cmd/ds2api-tests \
|
||||
--config config.json \
|
||||
--admin-key admin \
|
||||
--out artifacts/testsuite \
|
||||
--timeout 120 \
|
||||
--retries 2
|
||||
```
|
||||
|
||||
```bash
|
||||
# 发布前阻断门禁
|
||||
./tests/scripts/check-stage6-manual-smoke.sh
|
||||
./tests/scripts/check-refactor-line-gate.sh
|
||||
./tests/scripts/run-unit-all.sh
|
||||
npm ci --prefix webui && npm run build --prefix webui
|
||||
```
|
||||
|
||||
## 测试
|
||||
|
||||
详细测试指南请参阅 [docs/TESTING.md](docs/TESTING.md)。
|
||||
|
||||
### 快速测试命令
|
||||
|
||||
```bash
|
||||
# 运行所有单元测试
|
||||
go test ./...
|
||||
# 本地 PR 门禁
|
||||
./scripts/lint.sh
|
||||
./tests/scripts/check-refactor-line-gate.sh
|
||||
./tests/scripts/run-unit-all.sh
|
||||
npm run build --prefix webui
|
||||
|
||||
# 运行 tool calls 相关测试(调试工具调用问题)
|
||||
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/toolcall/
|
||||
|
||||
# 运行端到端测试
|
||||
# 端到端全链路测试(真实账号,生成完整请求/响应日志)
|
||||
./tests/scripts/run-live.sh
|
||||
```
|
||||
|
||||
@@ -512,10 +430,10 @@ go test -v -run 'TestParseToolCalls|TestRepair' ./internal/toolcall/
|
||||
|
||||
工作流文件:`.github/workflows/release-artifacts.yml`
|
||||
|
||||
- **触发条件**:仅在 GitHub Release `published` 时触发(普通 push 不会触发)
|
||||
- **构建产物**:多平台二进制包(`linux/amd64`、`linux/arm64`、`darwin/amd64`、`darwin/arm64`、`windows/amd64`)+ `sha256sums.txt`
|
||||
- **触发条件**:默认仅在 GitHub Release `published` 时自动触发;也支持在 Actions 页面手动 `workflow_dispatch`,并填写 `release_tag` 复跑/补发
|
||||
- **构建产物**:多平台二进制包(`linux/amd64`、`linux/arm64`、`linux/armv7`、`darwin/amd64`、`darwin/arm64`、`windows/amd64`、`windows/arm64`)、Linux Docker 镜像导出包 + `sha256sums.txt`
|
||||
- **容器镜像发布**:仅推送到 GHCR(`ghcr.io/cjackhwang/ds2api`)
|
||||
- **每个压缩包包含**:`ds2api` 可执行文件、`static/admin`、WASM 文件(同时支持内置 fallback)、`config.example.json` 配置示例、README、LICENSE
|
||||
- **每个二进制压缩包包含**:`ds2api` 可执行文件、`static/admin`、`config.example.json`、`.env.example`、`README.MD`、`README.en.md`、`LICENSE`
|
||||
|
||||
## 免责声明
|
||||
|
||||
|
||||
250
README.en.md
250
README.en.md
@@ -4,6 +4,8 @@
|
||||
|
||||
# DS2API
|
||||
|
||||
<a href="https://trendshift.io/repositories/24508" target="_blank"><img src="https://trendshift.io/api/badge/repositories/24508" alt="CJackHwang%2Fds2api | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
[](LICENSE)
|
||||

|
||||

|
||||
@@ -14,7 +16,7 @@
|
||||
|
||||
Language: [中文](README.MD) | [English](README.en.md)
|
||||
|
||||
DS2API converts DeepSeek Web chat capability into OpenAI-compatible, Claude-compatible, and Gemini-compatible APIs. The backend is a **pure Go implementation**, with a React WebUI admin panel (source in `webui/`, build output auto-generated to `static/admin` during deployment).
|
||||
DS2API converts DeepSeek Web chat capability into OpenAI-compatible, Claude-compatible, and Gemini-compatible APIs. The core backend is Go-based, with a small Node Runtime bridge used for Vercel streaming, and the React WebUI admin panel lives in `webui/` (build output auto-generated to `static/admin` during deployment).
|
||||
|
||||
Documentation entry: [Docs Index](docs/README.md) / [Architecture](docs/ARCHITECTURE.en.md) / [API Reference](API.en.md)
|
||||
|
||||
@@ -26,6 +28,30 @@ Documentation entry: [Docs Index](docs/README.md) / [Architecture](docs/ARCHITEC
|
||||
>
|
||||
> Do not use this project in ways that violate service terms, agreements, laws, or platform rules. Before any commercial use, review the `LICENSE`, the relevant terms, and confirm that you have the author's written permission.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Architecture Overview (Summary)](#architecture-overview-summary)
|
||||
- [Key Capabilities](#key-capabilities)
|
||||
- [Platform Compatibility Matrix](#platform-compatibility-matrix)
|
||||
- [Model Support](#model-support)
|
||||
- [OpenAI Endpoint](#openai-endpoint-get-v1models)
|
||||
- [Claude Endpoint](#claude-endpoint-get-anthropicv1models)
|
||||
- [Gemini Endpoint](#gemini-endpoint)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Option 1: Download Release Binaries](#option-1-download-release-binaries)
|
||||
- [Option 2: Docker / GHCR](#option-2-docker--ghcr)
|
||||
- [Option 3: Vercel](#option-3-vercel)
|
||||
- [Option 4: Local Run](#option-4-local-run)
|
||||
- [Configuration](#configuration)
|
||||
- [Authentication Modes](#authentication-modes)
|
||||
- [Concurrency Model](#concurrency-model)
|
||||
- [Tool Call Adaptation](#tool-call-adaptation)
|
||||
- [Local Dev Packet Capture](#local-dev-packet-capture)
|
||||
- [Documentation Index](#documentation-index)
|
||||
- [Testing](#testing)
|
||||
- [Release Artifact Automation (GitHub Actions)](#release-artifact-automation-github-actions)
|
||||
- [Disclaimer](#disclaimer)
|
||||
|
||||
## Architecture Overview (Summary)
|
||||
|
||||
```mermaid
|
||||
@@ -33,25 +59,28 @@ flowchart LR
|
||||
Client["🖥️ Clients / SDKs\n(OpenAI / Claude / Gemini)"]
|
||||
Upstream["☁️ DeepSeek API"]
|
||||
|
||||
subgraph DS2API["DS2API 3.x (Unified OpenAI Core)"]
|
||||
subgraph DS2API["DS2API 4.x (Modular HTTP Surface + PromptCompat Core)"]
|
||||
Router["chi Router + Middleware\n(RequestID / RealIP / Logger / Recoverer / CORS)"]
|
||||
|
||||
subgraph Adapters["Protocol Adapters"]
|
||||
OA["OpenAI\n/v1/*"]
|
||||
subgraph HTTP["HTTP API Surface"]
|
||||
OA["OpenAI\nchat / responses / files / embeddings"]
|
||||
CA["Claude\n/anthropic/* + /v1/messages"]
|
||||
GA["Gemini\n/v1beta/models/* + /v1/models/*"]
|
||||
Admin["Admin API\n/admin/*"]
|
||||
Admin["Admin API\nresource packages"]
|
||||
WebUI["WebUI\n/admin (static hosting)"]
|
||||
Vercel["Vercel Node Stream\n/v1/chat/completions"]
|
||||
end
|
||||
|
||||
subgraph Runtime["Runtime + Core Capabilities"]
|
||||
Bridge["CLIProxy Bridge\n(multi-protocol <-> OpenAI)"]
|
||||
OAEngine["OpenAI ChatCompletions\n(unified tools + stream semantics)"]
|
||||
Compat["PromptCompat\n(API -> web-chat plain text context)"]
|
||||
Completion["Completion Runtime\n(session / PoW / completion)"]
|
||||
Turn["AssistantTurn\n(output semantic normalization)"]
|
||||
Auth["Auth Resolver\n(API key / bearer / x-goog-api-key)"]
|
||||
Pool["Account Pool + Queue\n(in-flight slots + wait queue)"]
|
||||
DSClient["DeepSeek Client\n(session / auth / HTTP)"]
|
||||
Pow["PoW Solver\n(Pure Go ms-level)"]
|
||||
DSClient["DeepSeek Client\n(session / auth / completion / files)"]
|
||||
Pow["PoW Solver\n(Pure Go)"]
|
||||
Tool["Tool Sieve\n(Go/Node semantic parity)"]
|
||||
History["Current Input File\n(DS2API_HISTORY.txt)"]
|
||||
end
|
||||
end
|
||||
|
||||
@@ -59,19 +88,24 @@ flowchart LR
|
||||
Router --> OA & CA & GA
|
||||
Router --> Admin
|
||||
Router --> WebUI
|
||||
Router --> Vercel
|
||||
|
||||
OA --> OAEngine
|
||||
CA & GA --> Bridge
|
||||
Bridge --> OAEngine
|
||||
OAEngine --> Auth
|
||||
OAEngine -.account rotation.-> Pool
|
||||
OAEngine -.tool-call parsing.-> Tool
|
||||
OAEngine -.PoW solving.-> Pow
|
||||
OA --> Compat
|
||||
CA & GA --> Compat
|
||||
Compat --> Completion
|
||||
Completion -.full context.-> History
|
||||
Completion --> Turn
|
||||
Vercel -.Go prepare.-> Completion
|
||||
Vercel -.Node SSE.-> Tool
|
||||
Completion --> Auth
|
||||
Completion -.account rotation.-> Pool
|
||||
Completion -.tool-call parsing.-> Tool
|
||||
Completion -.PoW solving.-> Pow
|
||||
Auth --> DSClient
|
||||
DSClient --> Upstream
|
||||
Upstream --> DSClient
|
||||
OAEngine --> Bridge
|
||||
Bridge --> Client
|
||||
Turn --> Client
|
||||
Vercel --> Client
|
||||
```
|
||||
|
||||
For the full module-by-module architecture and directory responsibilities, see [docs/ARCHITECTURE.en.md](docs/ARCHITECTURE.en.md).
|
||||
@@ -84,9 +118,10 @@ For the full module-by-module architecture and directory responsibilities, see [
|
||||
|
||||
| Capability | Details |
|
||||
| --- | --- |
|
||||
| OpenAI compatible | `GET /v1/models`, `GET /v1/models/{id}`, `POST /v1/chat/completions`, `POST /v1/responses`, `GET /v1/responses/{response_id}`, `POST /v1/embeddings`, `POST /v1/files` |
|
||||
| OpenAI compatible | `GET /v1/models`, `GET /v1/models/{id}`, `POST /v1/chat/completions`, `POST /v1/responses`, `GET /v1/responses/{response_id}`, `POST /v1/embeddings`, `POST /v1/files`, `GET /v1/files/{file_id}` |
|
||||
| Claude compatible | `GET /anthropic/v1/models`, `POST /anthropic/v1/messages`, `POST /anthropic/v1/messages/count_tokens` (plus shortcut paths `/v1/messages`, `/messages`) |
|
||||
| Gemini compatible | `POST /v1beta/models/{model}:generateContent`, `POST /v1beta/models/{model}:streamGenerateContent` (plus `/v1/models/{model}:*` paths) |
|
||||
| Unified CORS compatibility | `/v1/*`, `/anthropic/*`, `/v1beta/models/*`, and `/admin/*` share one CORS policy; on Vercel, the Node Runtime for `/v1/chat/completions` mirrors the same relaxed preflight behavior for third-party clients |
|
||||
| Multi-account rotation | Auto token refresh, email/mobile dual login |
|
||||
| Concurrency control | Per-account in-flight limit + waiting queue, dynamic recommended concurrency |
|
||||
| DeepSeek PoW | Pure Go high-performance solver (DeepSeekHashV1), ms-level response |
|
||||
@@ -95,6 +130,8 @@ For the full module-by-module architecture and directory responsibilities, see [
|
||||
| WebUI Admin Panel | SPA at `/admin` (bilingual Chinese/English, dark mode, with server-side conversation history) |
|
||||
| Health Probes | `GET /healthz` (liveness), `GET /readyz` (readiness) |
|
||||
|
||||
OpenAI `/v1/*` routes remain canonical, and DS2API also accepts root shortcuts such as `/models`, `/chat/completions`, `/responses`, `/embeddings`, `/files`, and `/files/{file_id}` for clients configured with the bare service URL.
|
||||
|
||||
## Platform Compatibility Matrix
|
||||
|
||||
| Tier | Platform | Status |
|
||||
@@ -112,38 +149,32 @@ For the full module-by-module architecture and directory responsibilities, see [
|
||||
|
||||
| Family | Model ID | thinking | search |
|
||||
| --- | --- | --- | --- |
|
||||
| default | `deepseek-chat` | ❌ | ❌ |
|
||||
| default | `deepseek-reasoner` | ✅ | ❌ |
|
||||
| default | `deepseek-chat-search` | ❌ | ✅ |
|
||||
| default | `deepseek-reasoner-search` | ✅ | ✅ |
|
||||
| expert | `deepseek-expert-chat` | ❌ | ❌ |
|
||||
| expert | `deepseek-expert-reasoner` | ✅ | ❌ |
|
||||
| expert | `deepseek-expert-chat-search` | ❌ | ✅ |
|
||||
| expert | `deepseek-expert-reasoner-search` | ✅ | ✅ |
|
||||
| vision | `deepseek-vision-chat` | ❌ | ❌ |
|
||||
| vision | `deepseek-vision-reasoner` | ✅ | ❌ |
|
||||
| vision | `deepseek-vision-chat-search` | ❌ | ✅ |
|
||||
| vision | `deepseek-vision-reasoner-search` | ✅ | ✅ |
|
||||
| default | `deepseek-v4-flash` | enabled by default, request-controlled | ❌ |
|
||||
| expert | `deepseek-v4-pro` | enabled by default, request-controlled | ❌ |
|
||||
| default | `deepseek-v4-flash-search` | enabled by default, request-controlled | ✅ |
|
||||
| expert | `deepseek-v4-pro-search` | enabled by default, request-controlled | ✅ |
|
||||
| vision | `deepseek-v4-vision` | enabled by default, request-controlled | ❌ |
|
||||
|
||||
Besides native IDs, DS2API also accepts common aliases as input (for example `gpt-5`, `gpt-5-mini`, `gpt-5-codex`, `gpt-4.1`, `o3`, `claude-opus-4-6`, `claude-sonnet-4-5`, `gemini-2.5-pro`, `gemini-2.5-flash`), but `/v1/models` returns normalized DeepSeek native model IDs.
|
||||
Besides native IDs, DS2API also accepts common aliases as input (for example `gpt-4.1`, `gpt-5`, `gpt-5-codex`, `o3`, `claude-*`, `gemini-*`), but `/v1/models` returns normalized DeepSeek native model IDs. The complete alias behavior is documented in [API.en.md](API.en.md#model-alias-resolution) and `config.example.json`.
|
||||
Current upstream vision support exposes only the `vision` lane and does not provide a separate search-enabled vision variant.
|
||||
|
||||
### Claude Endpoint (`GET /anthropic/v1/models`)
|
||||
|
||||
| Current common model | Default Mapping |
|
||||
| --- | --- |
|
||||
| `claude-sonnet-4-5` | `deepseek-chat` |
|
||||
| `claude-haiku-4-5` (compatible with `claude-3-5-haiku-latest`) | `deepseek-chat` |
|
||||
| `claude-opus-4-6` | `deepseek-reasoner` |
|
||||
| `claude-sonnet-4-6` | `deepseek-v4-flash` |
|
||||
| `claude-haiku-4-5` (compatible with `claude-3-5-haiku-latest`) | `deepseek-v4-flash` |
|
||||
| `claude-opus-4-6` | `deepseek-v4-pro` |
|
||||
|
||||
Override mapping via `claude_mapping` or `claude_model_mapping` in config.
|
||||
Besides the current primary aliases above, `/anthropic/v1/models` also returns Claude 4.x snapshots plus historical 3.x / 2.x / 1.x IDs and common aliases for legacy client compatibility.
|
||||
Override mapping via the global `model_aliases` config.
|
||||
Besides the primary aliases above, `/anthropic/v1/models` also returns Claude 4.x snapshots plus historical 3.x IDs and common aliases for legacy client compatibility.
|
||||
|
||||
#### Claude Code integration pitfalls (validated)
|
||||
|
||||
- Set `ANTHROPIC_BASE_URL` to the DS2API root URL (for example `http://127.0.0.1:5001`). Claude Code sends requests to `/v1/messages?beta=true`.
|
||||
- `ANTHROPIC_API_KEY` must match an entry in `keys` from `config.json`. Keeping both a regular key and an `sk-ant-*` style key improves client compatibility.
|
||||
- If your environment has proxy variables, set `NO_PROXY=127.0.0.1,localhost,<your_host_ip>` for DS2API to avoid proxy interception of local traffic.
|
||||
- If tool calls are rendered as plain text and not executed, first verify the model output uses supported XML/Markup tool blocks (`<tool_call>` / `<function_call>` / `<invoke>` / `tool_use`) rather than standalone JSON `tool_calls`.
|
||||
- If tool calls are rendered as plain text and not executed, first verify the model output uses the recommended DSML block: `<|DSML|tool_calls><|DSML|invoke name="..."><|DSML|parameter name="...">...`. DS2API also accepts legacy canonical XML: `<tool_calls><invoke name="..."><parameter name="...">...`; legacy `<tools>` / `<tool_call>` / `<tool_name>` / `<param>`, `<function_call>`, `tool_use`, or standalone JSON `tool_calls` are not executed.
|
||||
|
||||
### Gemini Endpoint
|
||||
|
||||
@@ -206,6 +237,7 @@ docker-compose up -d
|
||||
```
|
||||
|
||||
The default `docker-compose.yml` uses `ghcr.io/cjackhwang/ds2api:latest` and maps host port `6011` to container port `5001`. If you want `5001` exposed directly, set `DS2API_HOST_PORT=5001` (or adjust the `ports` mapping).
|
||||
It also mounts `./config.json` to `/data/config.json` and sets `DS2API_CONFIG_PATH=/data/config.json` by default, which avoids runtime token persistence failures caused by read-only `/app`.
|
||||
|
||||
Rebuild after updates: `docker-compose up -d --build`
|
||||
|
||||
@@ -215,6 +247,10 @@ Rebuild after updates: `docker-compose up -d --build`
|
||||
2. After deployment, open `/admin` and login with `DS2API_ADMIN_KEY` shown in Zeabur env/template instructions.
|
||||
3. Import / edit config in Admin UI (it will be written and persisted to `/data/config.json`).
|
||||
|
||||
Fresh Zeabur volumes can start without `/data/config.json`; DS2API will boot with an empty file-backed config and create the file on the first Admin UI save.
|
||||
|
||||
For manual deployment without the template, create a Zeabur GitHub service, keep Root Directory as `/`, build with the repo-root `Dockerfile`, mount a persistent volume at `/data`, set `PORT=5001`, `DS2API_ADMIN_KEY=your-strong-secret`, and `DS2API_CONFIG_PATH=/data/config.json`, then expose HTTP port `5001`. See [docs/DEPLOY.en.md](docs/DEPLOY.en.md#manual-deployment-without-the-template) for the full guide.
|
||||
|
||||
Note: when Zeabur builds directly from the repo `Dockerfile`, you do not need to pass `BUILD_VERSION`. The image prefers that build arg when provided, and automatically falls back to the repo-root `VERSION` file when it is absent.
|
||||
|
||||
### Option 3: Vercel
|
||||
@@ -237,7 +273,7 @@ Recommended: convert `config.json` to Base64 locally, then paste into `DS2API_CO
|
||||
base64 < config.json | tr -d '\n'
|
||||
```
|
||||
|
||||
> **Streaming note**: `/v1/chat/completions` on Vercel is routed to `api/chat-stream.js` (Node Runtime) for real-time SSE. Auth, account selection, and session/PoW preparation are still handled by the Go internal prepare endpoint; streaming output (including `tools`) is assembled on Node with Go-aligned anti-leak handling.
|
||||
> **Streaming note**: `/v1/chat/completions` on Vercel is routed to `api/chat-stream.js` (Node Runtime) for real-time SSE. Auth, account selection, and session/PoW preparation are still handled by the Go internal prepare endpoint; streaming output (including `tools`) is assembled on Node with Go-aligned anti-leak handling. This is the only interface family currently routed through Node, and its CORS allow behavior is kept aligned with the Go router so third-party preflight handling stays unified.
|
||||
|
||||
For detailed deployment instructions, see the [Deployment Guide](docs/DEPLOY.en.md).
|
||||
|
||||
@@ -266,102 +302,19 @@ The server actually binds to `0.0.0.0:5001`, so devices on the same LAN can usua
|
||||
|
||||
## Configuration
|
||||
|
||||
### `config.json` Example
|
||||
`README` keeps only the onboarding path. Use [config.example.json](config.example.json) as the field template, and see the [deployment guide](docs/DEPLOY.en.md#0-prerequisites) plus [API configuration notes](API.en.md#configuration-best-practice) for full details.
|
||||
|
||||
```json
|
||||
{
|
||||
"keys": ["your-api-key-1", "your-api-key-2"],
|
||||
"accounts": [
|
||||
{
|
||||
"email": "user@example.com",
|
||||
"password": "your-password"
|
||||
},
|
||||
{
|
||||
"mobile": "12345678901",
|
||||
"password": "your-password"
|
||||
}
|
||||
],
|
||||
"model_aliases": {
|
||||
"gpt-4o": "deepseek-chat",
|
||||
"gpt-5": "deepseek-chat",
|
||||
"gpt-5-mini": "deepseek-chat",
|
||||
"gpt-5-codex": "deepseek-reasoner",
|
||||
"o3": "deepseek-reasoner",
|
||||
"claude-opus-4-6": "deepseek-reasoner",
|
||||
"gemini-2.5-flash": "deepseek-chat"
|
||||
},
|
||||
"compat": {
|
||||
"wide_input_strict_output": true,
|
||||
"strip_reference_markers": true
|
||||
},
|
||||
"responses": {
|
||||
"store_ttl_seconds": 900
|
||||
},
|
||||
"embeddings": {
|
||||
"provider": "deterministic"
|
||||
},
|
||||
"claude_mapping": {
|
||||
"fast": "deepseek-chat",
|
||||
"slow": "deepseek-reasoner"
|
||||
},
|
||||
"admin": {
|
||||
"jwt_expire_hours": 24
|
||||
},
|
||||
"runtime": {
|
||||
"account_max_inflight": 2,
|
||||
"account_max_queue": 0,
|
||||
"global_max_inflight": 0,
|
||||
"token_refresh_interval_hours": 6
|
||||
},
|
||||
"auto_delete": {
|
||||
"mode": "none"
|
||||
}
|
||||
}
|
||||
```
|
||||
Common fields:
|
||||
|
||||
- `keys`: API access keys; clients authenticate via `Authorization: Bearer <key>`
|
||||
- `accounts`: DeepSeek account list, supports `email` or `mobile` login
|
||||
- `token`: Even if set in `config.json`, it is cleared during load (DS2API does not read persisted tokens from config); runtime tokens are maintained/refreshed in memory only
|
||||
- `model_aliases`: Map common model names (GPT/Codex/Claude) to DeepSeek models
|
||||
- `compat.wide_input_strict_output`: Keep `true` (current default policy)
|
||||
- `compat.strip_reference_markers`: Keep `true`; it strips reference markers from visible output
|
||||
- `toolcall`: Legacy field; the current behavior is fixed to feature matching + high-confidence early emit, and any config value is ignored
|
||||
- `responses.store_ttl_seconds`: In-memory TTL for `/v1/responses/{id}`
|
||||
- `embeddings.provider`: Embeddings provider (`deterministic/mock/builtin` built-in)
|
||||
- `claude_mapping`: Maps `fast`/`slow` suffixes to corresponding DeepSeek models (still compatible with `claude_model_mapping`)
|
||||
- `admin`: Admin panel settings (JWT expiry, password hash, etc.), hot-reloadable via Admin Settings API
|
||||
- `runtime`: Runtime parameters (concurrency limits, queue sizes, managed token refresh interval), hot-reloadable via Admin Settings API; `account_max_queue=0`/`global_max_inflight=0` means auto-calculate from recommended values, `token_refresh_interval_hours=6` is the default forced re-login interval
|
||||
- `auto_delete.mode`: How to clean up DeepSeek remote chat records after each request completes. Supported values: `none` (default, no deletion), `single` (delete only the current session), `all` (delete all sessions); legacy `auto_delete.sessions=true` is still treated as `all`
|
||||
- `keys` / `api_keys`: client API keys; `api_keys` adds `name` and `remark` metadata while `keys` remains compatible.
|
||||
- `accounts`: managed DeepSeek accounts, supporting `email` or `mobile` login plus proxy/name/remark metadata.
|
||||
- `model_aliases`: one shared alias map for OpenAI / Claude / Gemini model names.
|
||||
- `runtime`: account concurrency, queueing, and token refresh behavior, hot-reloadable via Admin Settings.
|
||||
- `auto_delete.mode`: remote session cleanup after each request, supporting `none` / `single` / `all`.
|
||||
- `current_input_file`: the global context split/upload mode; it is enabled by default and uploads the full context as a `DS2API_HISTORY.txt` context file once the character threshold is reached.
|
||||
- If you turn off `current_input_file`, requests pass through directly without uploading any split context file.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Purpose | Default |
|
||||
| --- | --- | --- |
|
||||
| `PORT` | Service port | `5001` |
|
||||
| `LOG_LEVEL` | Log level | `INFO` (`DEBUG`/`WARN`/`ERROR`) |
|
||||
| `DS2API_ADMIN_KEY` | Admin login key | `admin` |
|
||||
| `DS2API_JWT_SECRET` | Admin JWT signing secret | Same as `DS2API_ADMIN_KEY` |
|
||||
| `DS2API_JWT_EXPIRE_HOURS` | Admin JWT TTL in hours | `24` |
|
||||
| `DS2API_CONFIG_PATH` | Config file path | `config.json` |
|
||||
| `DS2API_CONFIG_JSON` | Inline config (JSON or Base64) | — |
|
||||
| `DS2API_CHAT_HISTORY_PATH` | Server-side conversation history file path | `data/chat_history.json` |
|
||||
| `DS2API_ENV_WRITEBACK` | Auto-write env-backed config to file and transition to file mode (`1/true/yes/on`) | Disabled |
|
||||
| `DS2API_STATIC_ADMIN_DIR` | Admin static assets dir | `static/admin` |
|
||||
| `DS2API_AUTO_BUILD_WEBUI` | Auto-build WebUI on startup | Enabled locally, disabled on Vercel |
|
||||
| `DS2API_ACCOUNT_MAX_INFLIGHT` | Max in-flight requests per account | `2` |
|
||||
| `DS2API_ACCOUNT_MAX_QUEUE` | Waiting queue limit | `recommended_concurrency` |
|
||||
| `DS2API_GLOBAL_MAX_INFLIGHT` | Global max in-flight requests | `recommended_concurrency` |
|
||||
| `DS2API_VERCEL_INTERNAL_SECRET` | Vercel hybrid streaming internal auth | Falls back to `DS2API_ADMIN_KEY` |
|
||||
| `DS2API_VERCEL_STREAM_LEASE_TTL_SECONDS` | Stream lease TTL seconds | `900` |
|
||||
| `DS2API_DEV_PACKET_CAPTURE` | Local dev packet capture switch (record recent request/response bodies) | Enabled by default on non-Vercel local runtime |
|
||||
| `DS2API_DEV_PACKET_CAPTURE_LIMIT` | Number of captured sessions to retain (auto-evict overflow) | `20` |
|
||||
| `DS2API_DEV_PACKET_CAPTURE_MAX_BODY_BYTES` | Max recorded bytes per captured response body | `5242880` |
|
||||
| `VERCEL_TOKEN` | Vercel sync token | — |
|
||||
| `VERCEL_PROJECT_ID` | Vercel project ID | — |
|
||||
| `VERCEL_TEAM_ID` | Vercel team ID | — |
|
||||
| `DS2API_VERCEL_PROTECTION_BYPASS` | Vercel deployment protection bypass for internal Node→Go calls | — |
|
||||
|
||||
> Note: when `DS2API_CONFIG_JSON` is detected, the Admin UI shows mode risk and auto-persistence status (including `DS2API_CONFIG_PATH` and mode-transition hints).
|
||||
For the full environment variable list, see [docs/DEPLOY.en.md](docs/DEPLOY.en.md). For auth behavior, see [API.en.md](API.en.md#authentication).
|
||||
|
||||
## Authentication Modes
|
||||
|
||||
@@ -393,7 +346,7 @@ Queue limit = DS2API_ACCOUNT_MAX_QUEUE (default = recommended concurrency)
|
||||
When `tools` is present in the request, DS2API performs anti-leak handling:
|
||||
|
||||
1. Toolcall feature matching is enabled only in **non-code-block context** (fenced examples are ignored)
|
||||
2. The parser currently targets XML/Markup-family tool syntax (`<tool_call>` / `<function_call>` / `<invoke>` / `tool_use` / antml variants); standalone JSON `tool_calls` payloads are not treated as executable calls by default
|
||||
2. The parser now treats the DSML shell as the recommended executable tool-calling syntax: `<|DSML|tool_calls>` → `<|DSML|invoke name="...">` → `<|DSML|parameter name="...">`; it also accepts legacy canonical XML `<tool_calls>` → `<invoke name="...">` → `<parameter name="...">`. DSML is a shell alias and internal parsing remains XML-based; legacy `<tools>` / `<tool_call>` / `<tool_name>` / `<param>`, `<function_call>`, `tool_use`, antml variants, and standalone JSON `tool_calls` payloads are treated as plain text
|
||||
3. `responses` streaming strictly uses official item lifecycle events (`response.output_item.*`, `response.content_part.*`, `response.function_call_arguments.*`)
|
||||
4. `responses` supports and enforces `tool_choice` (`auto`/`none`/`required`/forced function); `required` violations return `422` for non-stream and `response.failed` for stream
|
||||
5. The output protocol follows the client request (OpenAI / Claude / Gemini native shapes); model-side prompting can prefer XML, and the compatibility layer handles the protocol-specific translation
|
||||
@@ -442,38 +395,29 @@ The save endpoint can target a chain by `query`, `chain_key`, or `capture_id`. E
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
# Unit tests (Go + Node)
|
||||
./tests/scripts/run-unit-all.sh
|
||||
For the full testing guide, see [docs/TESTING.md](docs/TESTING.md).
|
||||
|
||||
# One-command live end-to-end tests (real accounts, full request/response logs)
|
||||
./tests/scripts/run-live.sh
|
||||
|
||||
# Or with custom flags
|
||||
go run ./cmd/ds2api-tests \
|
||||
--config config.json \
|
||||
--admin-key admin \
|
||||
--out artifacts/testsuite \
|
||||
--timeout 120 \
|
||||
--retries 2
|
||||
```
|
||||
Quick commands:
|
||||
|
||||
```bash
|
||||
# Release-blocking gates
|
||||
./tests/scripts/check-stage6-manual-smoke.sh
|
||||
# Local PR gates
|
||||
./scripts/lint.sh
|
||||
./tests/scripts/check-refactor-line-gate.sh
|
||||
./tests/scripts/run-unit-all.sh
|
||||
npm ci --prefix webui && npm run build --prefix webui
|
||||
npm run build --prefix webui
|
||||
|
||||
# Live end-to-end tests (real accounts, full request/response logs)
|
||||
./tests/scripts/run-live.sh
|
||||
```
|
||||
|
||||
## Release Artifact Automation (GitHub Actions)
|
||||
|
||||
Workflow: `.github/workflows/release-artifacts.yml`
|
||||
|
||||
- **Trigger**: only on GitHub Release `published` (normal pushes do not trigger builds)
|
||||
- **Outputs**: multi-platform archives (`linux/amd64`, `linux/arm64`, `darwin/amd64`, `darwin/arm64`, `windows/amd64`) + `sha256sums.txt`
|
||||
- **Trigger**: by default only on GitHub Release `published`; you can also run it manually via `workflow_dispatch` and pass `release_tag` to rerun / backfill
|
||||
- **Outputs**: multi-platform binary archives (`linux/amd64`, `linux/arm64`, `linux/armv7`, `darwin/amd64`, `darwin/arm64`, `windows/amd64`, `windows/arm64`), Linux Docker image export tarballs, and `sha256sums.txt`
|
||||
- **Container publishing**: GHCR only (`ghcr.io/cjackhwang/ds2api`)
|
||||
- **Each archive includes**: `ds2api` executable, `static/admin`, WASM file (with embedded fallback support), `config.example.json`-based config template, README, LICENSE
|
||||
- **Each binary archive includes**: the `ds2api` executable, `static/admin`, `config.example.json`, `.env.example`, `README.MD`, `README.en.md`, and `LICENSE`
|
||||
|
||||
## Disclaimer
|
||||
|
||||
|
||||
65
SECURITY.md
Normal file
65
SECURITY.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
**Only the latest version** receives security updates.
|
||||
If you are using an older version, please upgrade to the latest release.
|
||||
|
||||
| Version | Supported |
|
||||
| -------------- | ------------------ |
|
||||
| latest | :white_check_mark: |
|
||||
| < latest | :x: |
|
||||
|
||||
> **Why?** This project is maintained by a single developer. Keeping only one active version ensures fast response times and avoids legacy maintenance overhead.
|
||||
|
||||
## What is a Security Vulnerability?
|
||||
|
||||
A **security vulnerability** is a bug that can be exploited to compromise:
|
||||
- Data confidentiality (e.g., leaking secrets, user data)
|
||||
- Data integrity (e.g., unauthorized modification)
|
||||
- System availability (e.g., remote crash, denial of service)
|
||||
- Privilege escalation (e.g., normal user gains admin rights)
|
||||
|
||||
**Examples**: SQL injection, command injection, path traversal, authentication bypass, insecure deserialization, sensitive data exposure.
|
||||
|
||||
**What is NOT a security vulnerability?**
|
||||
Regular bugs like crashes (without exploit potential), incorrect return values, performance issues, missing features, or documentation typos. Please report those via **GitHub Issues** publicly.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe you have found a security vulnerability, **please do NOT open a public issue**.
|
||||
|
||||
Instead, send an email to: **cjackhwang@qq.com**
|
||||
|
||||
Please include as much as possible:
|
||||
- A clear description of the issue
|
||||
- Steps to reproduce (code / input / environment)
|
||||
- Potential impact (what could an attacker do?)
|
||||
- Suggested fix (if any)
|
||||
|
||||
You can expect:
|
||||
- **Initial response** within 3 business days (acknowledgment)
|
||||
- **Confirmation or clarification** within 7 days
|
||||
- **Fix or decision** within 14 days (depending on complexity)
|
||||
|
||||
## What to Expect After Reporting
|
||||
|
||||
| Outcome | What happens |
|
||||
| ------------------ | ------------- |
|
||||
| **Accepted** | I will develop a fix, release a patch version, and may credit you in the release notes (unless you prefer anonymity). |
|
||||
| **Declined** | I will explain why (e.g., not a security issue, already fixed, out of scope, or requires a larger redesign). |
|
||||
| **Need more info** | I will ask follow-up questions. If no response within 14 days, the report may be considered stale. |
|
||||
|
||||
## Disclosure Policy
|
||||
|
||||
- Vulnerabilities will be **fixed privately** and then released as a new version.
|
||||
- After the fix is released, I will typically publish a short security advisory (via GitHub Security Advisories) without revealing exploit details.
|
||||
- Public disclosure can be coordinated if you request it.
|
||||
|
||||
## Recognition
|
||||
|
||||
I appreciate security researchers who follow responsible disclosure. Contributors who report valid, previously unknown vulnerabilities may be acknowledged in the project's README or release notes (unless they prefer to stay anonymous).
|
||||
|
||||
---
|
||||
|
||||
*Thank you for helping keep this project safe!*
|
||||
@@ -35,8 +35,9 @@ func main() {
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: "0.0.0.0:" + port,
|
||||
Handler: app.Router,
|
||||
Addr: "0.0.0.0:" + port,
|
||||
Handler: app.Router,
|
||||
ReadHeaderTimeout: 5 * time.Second,
|
||||
}
|
||||
localURL := fmt.Sprintf("http://127.0.0.1:%s", port)
|
||||
lanIP := detectLANIPv4()
|
||||
|
||||
@@ -38,28 +38,25 @@
|
||||
}
|
||||
],
|
||||
"model_aliases": {
|
||||
"gpt-4o": "deepseek-chat",
|
||||
"gpt-5-codex": "deepseek-reasoner",
|
||||
"o3": "deepseek-reasoner"
|
||||
},
|
||||
"compat": {
|
||||
"wide_input_strict_output": true,
|
||||
"strip_reference_markers": true
|
||||
"gpt-4o": "deepseek-v4-flash",
|
||||
"gpt-5.5": "deepseek-v4-flash",
|
||||
"gpt-5.3-codex": "deepseek-v4-pro",
|
||||
"o3": "deepseek-v4-pro"
|
||||
},
|
||||
"responses": {
|
||||
"store_ttl_seconds": 900
|
||||
},
|
||||
"history_split": {
|
||||
"current_input_file": {
|
||||
"enabled": true,
|
||||
"trigger_after_turns": 1
|
||||
"min_chars": 0
|
||||
},
|
||||
"thinking_injection": {
|
||||
"enabled": true,
|
||||
"prompt": ""
|
||||
},
|
||||
"embeddings": {
|
||||
"provider": "deterministic"
|
||||
},
|
||||
"claude_mapping": {
|
||||
"fast": "deepseek-chat",
|
||||
"slow": "deepseek-reasoner"
|
||||
},
|
||||
"admin": {
|
||||
"jwt_expire_hours": 24
|
||||
},
|
||||
|
||||
@@ -9,8 +9,9 @@ services:
|
||||
# Host port is configurable via DS2API_HOST_PORT; container port stays fixed at 5001.
|
||||
- "${DS2API_HOST_PORT:-6011}:5001"
|
||||
volumes:
|
||||
- ./config.json:/app/config.json # 配置文件
|
||||
- ./config.json:/data/config.json # 配置文件(持久化推荐路径)
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- LOG_LEVEL=INFO
|
||||
- DS2API_ADMIN_KEY=${DS2API_ADMIN_KEY:-ds2api}
|
||||
- DS2API_CONFIG_PATH=/data/config.json
|
||||
|
||||
@@ -4,9 +4,9 @@ Language: [中文](ARCHITECTURE.md) | [English](ARCHITECTURE.en.md)
|
||||
|
||||
> This file is the single architecture source for directory layout, module boundaries, and execution flow.
|
||||
|
||||
## 1. Top-level Layout (expanded)
|
||||
## 1. Top-level Layout (core directories)
|
||||
|
||||
> Notes: this is the **fully expanded** project directory list (excluding metadata/dependency dirs such as `.git/` and `webui/node_modules/`), with each folder annotated by purpose.
|
||||
> Notes: this lists the main business directories (excluding metadata/dependency dirs such as `.git/` and `webui/node_modules/`), with each folder annotated by purpose. Newly added directories should be verified from the code tree rather than treated as a per-file inventory here.
|
||||
|
||||
```text
|
||||
ds2api/
|
||||
@@ -15,47 +15,64 @@ ds2api/
|
||||
│ └── workflows/ # GitHub Actions workflows
|
||||
├── api/ # Serverless entrypoints (Vercel Go/Node)
|
||||
├── app/ # Application-level handler assembly
|
||||
├── artifacts/ # Debug artifacts (raw-stream-sim, stream-debug, etc.)
|
||||
├── cmd/ # Executable entrypoints
|
||||
│ ├── ds2api/ # Main service bootstrap
|
||||
│ └── ds2api-tests/ # E2E testsuite CLI bootstrap
|
||||
├── docs/ # Project documentation
|
||||
├── internal/ # Core implementation (non-public packages)
|
||||
│ ├── account/ # Account pool, inflight slots, waiting queue
|
||||
│ ├── adapter/ # Multi-protocol adapters
|
||||
│ │ ├── claude/ # Claude protocol adapter
|
||||
│ │ ├── gemini/ # Gemini protocol adapter
|
||||
│ │ └── openai/ # OpenAI adapter and shared execution core
|
||||
│ ├── admin/ # Admin API (config/accounts/ops)
|
||||
│ ├── auth/ # Auth/JWT/credential resolution
|
||||
│ ├── chathistory/ # Server-side conversation history storage/query
|
||||
│ ├── claudeconv/ # Claude message conversion helpers
|
||||
│ ├── compat/ # Compatibility and regression helpers
|
||||
│ ├── assistantturn/ # Upstream output to canonical assistant turn / stream event semantics
|
||||
│ ├── completionruntime/ # Shared Go DeepSeek completion startup, non-stream collection, and retry
|
||||
│ ├── config/ # Config loading/validation/hot reload
|
||||
│ ├── deepseek/ # DeepSeek upstream client capabilities
|
||||
│ ├── deepseek/ # DeepSeek upstream client/protocol/transport
|
||||
│ │ ├── client/ # Login/session/completion/upload/delete calls
|
||||
│ │ ├── protocol/ # DeepSeek URLs, constants, skip path/pattern
|
||||
│ │ └── transport/ # DeepSeek transport details
|
||||
│ ├── devcapture/ # Dev capture and troubleshooting
|
||||
│ ├── format/ # Response formatting layer
|
||||
│ │ ├── claude/ # Claude output formatting
|
||||
│ │ └── openai/ # OpenAI output formatting
|
||||
│ ├── httpapi/ # HTTP surfaces: OpenAI/Claude/Gemini/Admin
|
||||
│ │ ├── admin/ # Admin API root assembly and resource packages
|
||||
│ │ ├── claude/ # Claude HTTP protocol adapter
|
||||
│ │ ├── gemini/ # Gemini HTTP protocol adapter
|
||||
│ │ ├── openai/ # OpenAI HTTP surface
|
||||
│ │ │ ├── chat/ # Chat Completions execution entrypoint
|
||||
│ │ │ ├── responses/ # Responses API and response store
|
||||
│ │ │ ├── files/ # Files API and inline-file preprocessing
|
||||
│ │ │ ├── embeddings/ # Embeddings API
|
||||
│ │ │ ├── history/ # OpenAI context file handling
|
||||
│ │ │ └── shared/ # OpenAI HTTP errors/models/tool formatting
|
||||
│ │ └── requestbody/ # HTTP body reading and UTF-8/JSON validation helpers
|
||||
│ ├── js/ # Node runtime related logic
|
||||
│ │ ├── chat-stream/ # Node streaming bridge
|
||||
│ │ ├── helpers/ # JS helper modules
|
||||
│ │ │ └── stream-tool-sieve/ # JS implementation of tool sieve
|
||||
│ │ └── shared/ # Shared semantics between Go/Node
|
||||
│ ├── prompt/ # Prompt composition
|
||||
│ ├── promptcompat/ # API request -> DeepSeek web-chat plain-text compatibility
|
||||
│ ├── rawsample/ # Raw sample read/write and management
|
||||
│ ├── server/ # Router and middleware assembly
|
||||
│ │ └── data/ # Router/runtime helper data
|
||||
│ ├── sse/ # SSE parsing utilities
|
||||
│ ├── stream/ # Unified stream consumption engine
|
||||
│ ├── testsuite/ # Testsuite execution framework
|
||||
│ ├── textclean/ # Text cleanup
|
||||
│ ├── toolcall/ # Tool-call parsing and repair
|
||||
│ ├── translatorcliproxy/ # Cross-protocol translation bridge
|
||||
│ ├── toolstream/ # Go streaming tool-call anti-leak and delta detection
|
||||
│ ├── translatorcliproxy/ # Vercel/fallback/test protocol translation bridge
|
||||
│ ├── util/ # Shared utility helpers
|
||||
│ ├── version/ # Version query/compare
|
||||
│ └── webui/ # WebUI static hosting logic
|
||||
├── plans/ # Stage plans and manual QA records
|
||||
├── pow/ # PoW standalone implementation + benchmarks
|
||||
├── scripts/ # Build/release helper scripts
|
||||
├── static/ # Build artifacts (admin static resources)
|
||||
├── tests/ # Test assets and scripts
|
||||
│ ├── compat/ # Compatibility fixtures + expected outputs
|
||||
│ │ ├── expected/ # Expected output samples
|
||||
@@ -64,9 +81,9 @@ ds2api/
|
||||
│ │ └── toolcalls/ # Tool-call fixtures
|
||||
│ ├── node/ # Node unit tests
|
||||
│ ├── raw_stream_samples/ # Upstream raw SSE samples
|
||||
│ │ ├── content-filter-trigger-20260405-jwt3/ # Content-filter terminal sample
|
||||
│ │ ├── continue-thinking-snapshot-replay-20260405/ # Continue-thinking sample
|
||||
│ │ ├── guangzhou-weather-reasoner-search-20260404/ # Search/reference sample
|
||||
│ │ ├── longtext-deepseek-v4-flash-20260429/ # Flash long-text/file-upload sample
|
||||
│ │ ├── longtext-deepseek-v4-pro-20260429/ # Pro long-text/file-upload sample
|
||||
│ │ ├── markdown-format-example-20260405/ # Markdown sample
|
||||
│ │ └── markdown-format-example-20260405-spacefix/ # Space-fix sample
|
||||
│ ├── scripts/ # Test entry scripts
|
||||
@@ -79,6 +96,8 @@ ds2api/
|
||||
├── features/ # Feature modules
|
||||
│ ├── account/ # Account management page
|
||||
│ ├── apiTester/ # API tester page
|
||||
│ ├── chatHistory/ # Server-side conversation history page
|
||||
│ ├── proxy/ # Proxy management page
|
||||
│ ├── settings/ # Settings page
|
||||
│ └── vercel/ # Vercel sync page
|
||||
├── layout/ # Layout components
|
||||
@@ -90,36 +109,105 @@ ds2api/
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
C[Client/SDK] --> R[internal/server/router.go]
|
||||
R --> OA[OpenAI Adapter]
|
||||
R --> CA[Claude Adapter]
|
||||
R --> GA[Gemini Adapter]
|
||||
R --> AD[Admin API]
|
||||
C[Client / SDK] --> R[internal/server/router.go]
|
||||
|
||||
CA --> BR[translatorcliproxy]
|
||||
GA --> BR
|
||||
BR --> CORE[internal/adapter/openai ChatCompletions]
|
||||
OA --> CORE
|
||||
subgraph HTTP[HTTP API surface]
|
||||
OA[internal/httpapi/openai]
|
||||
CHAT[openai/chat]
|
||||
RESP[openai/responses]
|
||||
FILES[openai/files + embeddings]
|
||||
CA[internal/httpapi/claude]
|
||||
GA[internal/httpapi/gemini]
|
||||
AD[internal/httpapi/admin/*]
|
||||
WEB[internal/webui static admin]
|
||||
end
|
||||
|
||||
CORE --> AUTH[internal/auth + config key/account resolver]
|
||||
CORE --> POOL[internal/account queue + concurrency]
|
||||
CORE --> TOOL[internal/toolcall parser + sieve]
|
||||
CORE --> DS[internal/deepseek client]
|
||||
subgraph COMPAT[Prompt compatibility core]
|
||||
PC[internal/promptcompat]
|
||||
PROMPT[internal/prompt]
|
||||
HIST[internal/httpapi/openai/history]
|
||||
end
|
||||
|
||||
subgraph RUNTIME[Shared runtime]
|
||||
AUTH[internal/auth]
|
||||
POOL[internal/account queue + concurrency]
|
||||
CR[internal/completionruntime]
|
||||
TURN[internal/assistantturn]
|
||||
STREAM[internal/stream + internal/sse]
|
||||
TOOL[internal/toolcall + internal/toolstream]
|
||||
FMT[internal/format/openai + claude]
|
||||
DS[internal/deepseek/client]
|
||||
POW[pow + internal/deepseek/protocol]
|
||||
end
|
||||
|
||||
subgraph NODE[Vercel Node Runtime]
|
||||
NCS[api/chat-stream.js]
|
||||
JS[internal/js/chat-stream + stream-tool-sieve]
|
||||
end
|
||||
|
||||
R --> OA --> CHAT
|
||||
OA --> RESP
|
||||
OA --> FILES
|
||||
R --> CA
|
||||
R --> GA
|
||||
R --> AD
|
||||
R --> WEB
|
||||
R -.Vercel stream.-> NCS
|
||||
|
||||
CA --> PC
|
||||
GA --> PC
|
||||
CHAT --> PC
|
||||
RESP --> PC
|
||||
PC --> PROMPT
|
||||
PC -.long history.-> HIST
|
||||
PC --> AUTH
|
||||
PC --> CR
|
||||
|
||||
NCS -.Go prepare/release.-> CHAT
|
||||
NCS --> JS
|
||||
JS --> TOOL
|
||||
|
||||
AUTH --> POOL
|
||||
CHAT --> CR
|
||||
RESP --> CR
|
||||
CA --> CR
|
||||
GA --> CR
|
||||
CR --> DS
|
||||
CR --> STREAM
|
||||
CR --> TURN
|
||||
STREAM --> TURN
|
||||
STREAM --> TOOL
|
||||
TURN --> FMT
|
||||
POOL --> CR
|
||||
DS --> POW
|
||||
DS --> U[DeepSeek upstream]
|
||||
```
|
||||
|
||||
## 3. Responsibilities in `internal/`
|
||||
|
||||
- `internal/server`: router tree + middlewares (health, protocol routes, Admin/WebUI).
|
||||
- `internal/adapter/openai`: shared execution core (chat/responses/embeddings + tool semantics).
|
||||
- `internal/adapter/{claude,gemini}`: protocol wrappers only (no duplicated upstream execution).
|
||||
- `internal/translatorcliproxy`: structure translation between Claude/Gemini and OpenAI.
|
||||
- `internal/deepseek`: upstream request/session/PoW/SSE handling.
|
||||
- `internal/stream` + `internal/sse`: stream parsing and incremental assembly.
|
||||
- `internal/toolcall`: XML/Markup-family tool-call parsing + anti-leak sieve (`<tool_call>` / `<function_call>` / `<invoke>` / `tool_use` / antml variants).
|
||||
- `internal/admin`: config/accounts/vercel sync/version/dev-capture endpoints.
|
||||
- `internal/httpapi/openai/*`: OpenAI HTTP surface split into chat, responses, files, embeddings, history, and shared packages; chat/responses share the promptcompat, stream, and toolcall semantics.
|
||||
- `internal/httpapi/{claude,gemini}`: protocol adapters that normalize into the same prompt compatibility semantics; normal direct paths must share DeepSeek session/PoW/completion execution through `completionruntime`, while `translatorcliproxy` is reserved for Vercel prepare/release, missing-backend fallback, and regression tests.
|
||||
- `internal/httpapi/requestbody`: shared HTTP body reading, JSON pre-validation, and UTF-8 error helpers across protocol adapters.
|
||||
- `internal/promptcompat`: compatibility core for turning OpenAI/Claude/Gemini requests into DeepSeek web-chat plain-text context.
|
||||
- `internal/assistantturn`: Go output-side canonical semantics, converting DeepSeek SSE collection results and stream finalization state into assistant turns and centralizing thinking, tool call, citation, usage, stop/error behavior.
|
||||
- `internal/completionruntime`: shared Go completion execution helpers for DeepSeek session/PoW/call startup, non-stream collection, and empty-output retry; streaming paths use it to start upstream requests, continue to use `internal/stream` for real-time consumption, and use `assistantturn` during finalization.
|
||||
- `internal/translatorcliproxy`: bridge compatibility layer for Claude/Gemini and OpenAI shape translation; it is not the main business protocol conversion center.
|
||||
- `internal/deepseek/{client,protocol,transport}`: upstream requests, sessions, PoW adaptation, protocol constants, and transport details.
|
||||
- `internal/js/chat-stream` + `api/chat-stream.js`: Vercel Node streaming bridge; Go prepare/release owns auth, account lease, and completion payload assembly, while Node relays real-time SSE with Go-aligned finalization and tool sieve semantics.
|
||||
- `internal/stream` + `internal/sse`: Go stream parsing and incremental assembly.
|
||||
- `internal/toolcall` + `internal/toolstream`: DSML shell compatibility plus canonical XML tool-call parsing and anti-leak sieve; DSML is normalized back to XML at the entrypoint, and internal parsing remains XML-based.
|
||||
- `internal/httpapi/admin/*`: Admin API root assembly plus auth/accounts/config/settings/proxies/rawsamples/vercel/history/devcapture/version resource packages.
|
||||
- `internal/chathistory`: server-side conversation history persistence, pagination, detail lookup, and retention policy.
|
||||
- `internal/config`: config loading/validation + runtime settings hot-reload.
|
||||
- `internal/account`: managed account pool, inflight slots, waiting queue.
|
||||
- `internal/textclean`: text cleanup helpers, e.g. stripping `[reference: N]` markers.
|
||||
- `internal/claudeconv`: Claude API request to DeepSeek format conversion.
|
||||
- `internal/compat`: compatibility regression tests using SSE fixtures to verify output consistency.
|
||||
- `internal/rawsample`: upstream raw response capture, read/write, and management.
|
||||
- `internal/devcapture`: developer debug capture, storing HTTP request/response for troubleshooting.
|
||||
- `internal/util`: cross-package utilities including JSON writing, type conversion, token counting, thinking parsing, etc.
|
||||
- `internal/version`: version query and comparison, supporting build-time injection and runtime resolution.
|
||||
|
||||
## 4. WebUI Runtime Relation
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
|
||||
> 本文档用于集中维护“代码目录结构 + 模块边界 + 主链路调用关系”。
|
||||
|
||||
## 1. 顶层目录结构(展开)
|
||||
## 1. 顶层目录结构(核心目录)
|
||||
|
||||
> 说明:以下为仓库内业务相关目录的**完整展开**(排除 `.git/` 与 `webui/node_modules/` 这类依赖/元数据目录),并标注每个文件夹作用。
|
||||
> 说明:以下为仓库内主要业务目录(排除 `.git/` 与 `webui/node_modules/` 这类依赖/元数据目录),并标注每个文件夹作用。新增目录以代码为准,不要求在本文做逐文件展开。
|
||||
|
||||
```text
|
||||
ds2api/
|
||||
@@ -15,47 +15,64 @@ ds2api/
|
||||
│ └── workflows/ # GitHub Actions 工作流
|
||||
├── api/ # Serverless 入口(Vercel Go/Node)
|
||||
├── app/ # 应用级 handler 装配层
|
||||
├── artifacts/ # 调试产物(raw-stream-sim, stream-debug 等)
|
||||
├── cmd/ # 可执行程序入口
|
||||
│ ├── ds2api/ # 主服务启动入口
|
||||
│ └── ds2api-tests/ # E2E 测试集 CLI 入口
|
||||
├── docs/ # 项目文档目录
|
||||
├── internal/ # 核心业务实现(不对外暴露)
|
||||
│ ├── account/ # 账号池、并发槽位、等待队列
|
||||
│ ├── adapter/ # 多协议适配层
|
||||
│ │ ├── claude/ # Claude 协议适配
|
||||
│ │ ├── gemini/ # Gemini 协议适配
|
||||
│ │ └── openai/ # OpenAI 协议与统一执行核心
|
||||
│ ├── admin/ # Admin API(配置/账号/运维)
|
||||
│ ├── auth/ # 鉴权/JWT/凭证解析
|
||||
│ ├── chathistory/ # 服务器端对话记录存储与查询
|
||||
│ ├── claudeconv/ # Claude 消息格式转换工具
|
||||
│ ├── compat/ # 兼容性辅助与回归支持
|
||||
│ ├── assistantturn/ # 上游输出到统一 assistant turn / stream event 的语义层
|
||||
│ ├── completionruntime/ # Go 主路径共享 DeepSeek completion 启动、非流式收集与 retry
|
||||
│ ├── config/ # 配置加载、校验、热更新
|
||||
│ ├── deepseek/ # DeepSeek 上游客户端能力
|
||||
│ ├── deepseek/ # DeepSeek 上游 client/protocol/transport
|
||||
│ │ ├── client/ # 登录、会话、completion、上传/删除等上游调用
|
||||
│ │ ├── protocol/ # DeepSeek URL、常量、skip path/pattern
|
||||
│ │ └── transport/ # DeepSeek 传输层细节
|
||||
│ ├── devcapture/ # 开发抓包与调试采集
|
||||
│ ├── format/ # 响应格式化层
|
||||
│ │ ├── claude/ # Claude 输出格式化
|
||||
│ │ └── openai/ # OpenAI 输出格式化
|
||||
│ ├── httpapi/ # HTTP surface:OpenAI/Claude/Gemini/Admin
|
||||
│ │ ├── admin/ # Admin API 根装配与资源子包
|
||||
│ │ ├── claude/ # Claude HTTP 协议适配
|
||||
│ │ ├── gemini/ # Gemini HTTP 协议适配
|
||||
│ │ ├── openai/ # OpenAI HTTP surface
|
||||
│ │ │ ├── chat/ # Chat Completions 执行入口
|
||||
│ │ │ ├── responses/ # Responses API 与 response store
|
||||
│ │ │ ├── files/ # Files API 与 inline file 预处理
|
||||
│ │ │ ├── embeddings/ # Embeddings API
|
||||
│ │ │ ├── history/ # OpenAI context file handling
|
||||
│ │ │ └── shared/ # OpenAI HTTP 公共错误/模型/工具格式
|
||||
│ │ └── requestbody/ # HTTP 请求体读取与 UTF-8/JSON 校验辅助
|
||||
│ ├── js/ # Node Runtime 相关逻辑
|
||||
│ │ ├── chat-stream/ # Node 流式输出桥接
|
||||
│ │ ├── helpers/ # JS 辅助函数
|
||||
│ │ │ └── stream-tool-sieve/ # Tool sieve JS 实现
|
||||
│ │ └── shared/ # Go/Node 共用语义片段
|
||||
│ ├── prompt/ # Prompt 组装
|
||||
│ ├── promptcompat/ # API 请求到 DeepSeek 网页纯文本上下文兼容层
|
||||
│ ├── rawsample/ # raw sample 读写与管理
|
||||
│ ├── server/ # 路由与中间件装配
|
||||
│ │ └── data/ # 路由/运行时辅助数据
|
||||
│ ├── sse/ # SSE 解析工具
|
||||
│ ├── stream/ # 统一流式消费引擎
|
||||
│ ├── testsuite/ # 测试集执行框架
|
||||
│ ├── textclean/ # 文本清洗
|
||||
│ ├── toolcall/ # 工具调用解析与修复
|
||||
│ ├── translatorcliproxy/ # 多协议互转桥
|
||||
│ ├── toolstream/ # Go 流式 tool call 防泄漏与增量检测
|
||||
│ ├── translatorcliproxy/ # Vercel/fallback/测试用协议互转桥
|
||||
│ ├── util/ # 通用工具函数
|
||||
│ ├── version/ # 版本查询/比较
|
||||
│ └── webui/ # WebUI 静态托管相关逻辑
|
||||
├── plans/ # 阶段计划与人工验收记录
|
||||
├── pow/ # PoW 独立实现与基准
|
||||
├── scripts/ # 构建/发布/辅助脚本
|
||||
├── static/ # 构建产物(admin 等静态资源)
|
||||
├── tests/ # 测试资源与脚本
|
||||
│ ├── compat/ # 兼容性夹具与期望输出
|
||||
│ │ ├── expected/ # 预期结果样本
|
||||
@@ -64,9 +81,9 @@ ds2api/
|
||||
│ │ └── toolcalls/ # toolcall 夹具
|
||||
│ ├── node/ # Node 单元测试
|
||||
│ ├── raw_stream_samples/ # 上游原始 SSE 样本
|
||||
│ │ ├── content-filter-trigger-20260405-jwt3/ # 风控终态样本
|
||||
│ │ ├── continue-thinking-snapshot-replay-20260405/ # continue 样本
|
||||
│ │ ├── guangzhou-weather-reasoner-search-20260404/ # 搜索+引用样本
|
||||
│ │ ├── longtext-deepseek-v4-flash-20260429/ # flash 长文本/文件上传样本
|
||||
│ │ ├── longtext-deepseek-v4-pro-20260429/ # pro 长文本/文件上传样本
|
||||
│ │ ├── markdown-format-example-20260405/ # Markdown 样本
|
||||
│ │ └── markdown-format-example-20260405-spacefix/ # 空格修复样本
|
||||
│ ├── scripts/ # 测试脚本入口
|
||||
@@ -79,6 +96,8 @@ ds2api/
|
||||
├── features/ # 功能模块
|
||||
│ ├── account/ # 账号管理页面
|
||||
│ ├── apiTester/ # API 测试页面
|
||||
│ ├── chatHistory/ # 服务器端对话记录页面
|
||||
│ ├── proxy/ # 代理管理页面
|
||||
│ ├── settings/ # 设置页面
|
||||
│ └── vercel/ # Vercel 同步页面
|
||||
├── layout/ # 布局组件
|
||||
@@ -90,36 +109,105 @@ ds2api/
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
C[Client/SDK] --> R[internal/server/router.go]
|
||||
R --> OA[OpenAI Adapter]
|
||||
R --> CA[Claude Adapter]
|
||||
R --> GA[Gemini Adapter]
|
||||
R --> AD[Admin API]
|
||||
C[Client / SDK] --> R[internal/server/router.go]
|
||||
|
||||
CA --> BR[translatorcliproxy]
|
||||
GA --> BR
|
||||
BR --> CORE[internal/adapter/openai ChatCompletions]
|
||||
OA --> CORE
|
||||
subgraph HTTP[HTTP API surface]
|
||||
OA[internal/httpapi/openai]
|
||||
CHAT[openai/chat]
|
||||
RESP[openai/responses]
|
||||
FILES[openai/files + embeddings]
|
||||
CA[internal/httpapi/claude]
|
||||
GA[internal/httpapi/gemini]
|
||||
AD[internal/httpapi/admin/*]
|
||||
WEB[internal/webui static admin]
|
||||
end
|
||||
|
||||
CORE --> AUTH[internal/auth + config key/account resolver]
|
||||
CORE --> POOL[internal/account queue + concurrency]
|
||||
CORE --> TOOL[internal/toolcall parser + sieve]
|
||||
CORE --> DS[internal/deepseek client]
|
||||
subgraph COMPAT[Prompt compatibility core]
|
||||
PC[internal/promptcompat]
|
||||
PROMPT[internal/prompt]
|
||||
HIST[internal/httpapi/openai/history]
|
||||
end
|
||||
|
||||
subgraph RUNTIME[Shared runtime]
|
||||
AUTH[internal/auth]
|
||||
POOL[internal/account queue + concurrency]
|
||||
CR[internal/completionruntime]
|
||||
TURN[internal/assistantturn]
|
||||
STREAM[internal/stream + internal/sse]
|
||||
TOOL[internal/toolcall + internal/toolstream]
|
||||
FMT[internal/format/openai + claude]
|
||||
DS[internal/deepseek/client]
|
||||
POW[pow + internal/deepseek/protocol]
|
||||
end
|
||||
|
||||
subgraph NODE[Vercel Node Runtime]
|
||||
NCS[api/chat-stream.js]
|
||||
JS[internal/js/chat-stream + stream-tool-sieve]
|
||||
end
|
||||
|
||||
R --> OA --> CHAT
|
||||
OA --> RESP
|
||||
OA --> FILES
|
||||
R --> CA
|
||||
R --> GA
|
||||
R --> AD
|
||||
R --> WEB
|
||||
R -.Vercel stream.-> NCS
|
||||
|
||||
CA --> PC
|
||||
GA --> PC
|
||||
CHAT --> PC
|
||||
RESP --> PC
|
||||
PC --> PROMPT
|
||||
PC -.长历史.-> HIST
|
||||
PC --> AUTH
|
||||
PC --> CR
|
||||
|
||||
NCS -.Go prepare/release.-> CHAT
|
||||
NCS --> JS
|
||||
JS --> TOOL
|
||||
|
||||
AUTH --> POOL
|
||||
CHAT --> CR
|
||||
RESP --> CR
|
||||
CA --> CR
|
||||
GA --> CR
|
||||
CR --> DS
|
||||
CR --> STREAM
|
||||
CR --> TURN
|
||||
STREAM --> TURN
|
||||
STREAM --> TOOL
|
||||
TURN --> FMT
|
||||
POOL --> CR
|
||||
DS --> POW
|
||||
DS --> U[DeepSeek upstream]
|
||||
```
|
||||
|
||||
## 3. internal/ 子模块职责
|
||||
|
||||
- `internal/server`:路由树和中间件挂载(健康检查、协议入口、Admin/WebUI)。
|
||||
- `internal/adapter/openai`:统一执行内核(chat/responses/embeddings 与 tool calling 语义)。
|
||||
- `internal/adapter/{claude,gemini}`:协议输入输出适配,不重复实现上游调用逻辑。
|
||||
- `internal/translatorcliproxy`:Claude/Gemini 与 OpenAI 结构互转。
|
||||
- `internal/deepseek`:上游请求、会话、PoW、SSE 消费。
|
||||
- `internal/stream` + `internal/sse`:流式解析与增量处理。
|
||||
- `internal/toolcall`:以 XML/Markup 家族为核心的工具调用解析与防泄漏筛分(`<tool_call>` / `<function_call>` / `<invoke>` / `tool_use` / antml 变体)。
|
||||
- `internal/admin`:配置管理、账号管理、Vercel 同步、版本检查、开发抓包。
|
||||
- `internal/httpapi/openai/*`:OpenAI HTTP surface,按 chat、responses、files、embeddings、history、shared 拆分;chat/responses 共享 promptcompat、stream、toolcall 等核心语义。
|
||||
- `internal/httpapi/{claude,gemini}`:协议输入输出适配,归一到同一套 prompt compatibility 语义;正常直连路径必须通过 `completionruntime` 共享 DeepSeek session/PoW/completion 调用,`translatorcliproxy` 仅保留给 Vercel prepare/release、后端缺失 fallback 和回归测试。
|
||||
- `internal/httpapi/requestbody`:跨协议复用的请求体读取、JSON 解码前置校验与 UTF-8 错误处理辅助。
|
||||
- `internal/promptcompat`:OpenAI/Claude/Gemini 请求到 DeepSeek 网页纯文本上下文的兼容内核。
|
||||
- `internal/assistantturn`:Go 输出侧统一语义层,把 DeepSeek SSE 收集结果和流式收尾状态归一成 assistant turn,集中处理 thinking、tool call、citation、usage、stop/error 语义。
|
||||
- `internal/completionruntime`:Go surface 共享的 completion 执行辅助,负责 DeepSeek session/PoW/call 启动、非流式 collect 和 empty-output retry;流式路径复用它启动上游请求,继续用 `internal/stream` 做实时消费,并在最终收尾阶段接入 `assistantturn`。
|
||||
- `internal/translatorcliproxy`:Claude/Gemini 与 OpenAI 结构互转的桥接兼容层,不作为主业务协议转换中心。
|
||||
- `internal/deepseek/{client,protocol,transport}`:上游请求、会话、PoW 适配、协议常量与传输层。
|
||||
- `internal/js/chat-stream` + `api/chat-stream.js`:Vercel Node 流式桥;Go prepare/release 管理鉴权、账号租约和 completion payload,Node 侧负责实时 SSE 转发并保持 Go 对齐的终结态和 tool sieve 语义。
|
||||
- `internal/stream` + `internal/sse`:Go 流式解析与增量处理。
|
||||
- `internal/toolcall` + `internal/toolstream`:DSML 外壳兼容与 canonical XML 工具调用解析、防泄漏筛分;DSML 会在入口归一化回 XML,内部仍按 XML 语义解析。
|
||||
- `internal/httpapi/admin/*`:Admin API 根装配与 auth/accounts/config/settings/proxies/rawsamples/vercel/history/devcapture/version 等资源子包。
|
||||
- `internal/chathistory`:服务器端对话记录持久化、分页、单条详情和保留策略。
|
||||
- `internal/config`:配置加载、校验、运行时 settings 热更新。
|
||||
- `internal/account`:托管账号池、并发槽位、等待队列。
|
||||
- `internal/textclean`:文本清洗,移除 `[reference: N]` 标记等噪声。
|
||||
- `internal/claudeconv`:Claude API 请求到 DeepSeek 格式的协议转换。
|
||||
- `internal/compat`:兼容性回归测试套件,用 SSE 夹具验证输出一致性。
|
||||
- `internal/rawsample`:上游原始响应的采集、读写与管理。
|
||||
- `internal/devcapture`:开发调试抓包,存储 HTTP 请求/响应用于问题排查。
|
||||
- `internal/util`:跨包通用工具,含 JSON 写入、类型转换、token 计数、thinking 解析等。
|
||||
- `internal/version`:版本号查询与比较,支持构建注入和运行时解析。
|
||||
|
||||
## 4. WebUI 与运行时关系
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ go run ./cmd/ds2api
|
||||
cd webui
|
||||
|
||||
# 2. Install dependencies
|
||||
npm install
|
||||
npm ci
|
||||
|
||||
# 3. Start dev server (hot reload)
|
||||
npm run dev
|
||||
@@ -59,10 +59,12 @@ docker-compose -f docker-compose.dev.yml up
|
||||
|
||||
| Language | Standards |
|
||||
| --- | --- |
|
||||
| **Go** | Run `./scripts/lint.sh` (gofmt + golangci-lint) and ensure `go test ./...` passes before committing |
|
||||
| **Go** | Run `gofmt -w` after editing Go files; before committing, run `./scripts/lint.sh` (format check + golangci-lint) |
|
||||
| **JavaScript/React** | Follow existing project style (functional components) |
|
||||
| **Commit messages** | Use semantic prefixes: `feat:`, `fix:`, `docs:`, `refactor:`, `style:`, `perf:`, `chore:` |
|
||||
|
||||
Do not silently ignore cleanup errors from I/O-style calls such as `Close`, `Flush`, or `Sync`; return them when possible, otherwise log them explicitly.
|
||||
|
||||
## Submitting a PR
|
||||
|
||||
1. Fork the repo
|
||||
@@ -85,10 +87,13 @@ Manually build WebUI to `static/admin/`:
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Go + Node unit tests (recommended)
|
||||
# Local PR gates (kept aligned with the quality-gates workflow)
|
||||
./scripts/lint.sh
|
||||
./tests/scripts/check-refactor-line-gate.sh
|
||||
./tests/scripts/run-unit-all.sh
|
||||
npm run build --prefix webui
|
||||
|
||||
# End-to-end live tests (real accounts)
|
||||
# End-to-end live tests (real accounts; recommended for releases or high-risk changes)
|
||||
./tests/scripts/run-live.sh
|
||||
```
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ go run ./cmd/ds2api
|
||||
cd webui
|
||||
|
||||
# 2. 安装依赖
|
||||
npm install
|
||||
npm ci
|
||||
|
||||
# 3. 启动开发服务器(热更新)
|
||||
npm run dev
|
||||
@@ -59,10 +59,12 @@ docker-compose -f docker-compose.dev.yml up
|
||||
|
||||
| 语言 | 规范 |
|
||||
| --- | --- |
|
||||
| **Go** | 提交前运行 `./scripts/lint.sh`(包含 gofmt+golangci-lint)并确保 `go test ./...` 通过 |
|
||||
| **Go** | 修改 Go 文件后运行 `gofmt -w`;提交前运行 `./scripts/lint.sh`(包含格式化检查和 golangci-lint) |
|
||||
| **JavaScript/React** | 保持现有代码风格(函数组件) |
|
||||
| **提交信息** | 使用语义化前缀:`feat:`、`fix:`、`docs:`、`refactor:`、`style:`、`perf:`、`chore:` |
|
||||
|
||||
I/O 类清理调用(如 `Close`、`Flush`、`Sync`)的错误不要直接忽略;无法向上返回时请显式记录日志。
|
||||
|
||||
## 提交 PR
|
||||
|
||||
1. Fork 仓库
|
||||
@@ -85,10 +87,13 @@ docker-compose -f docker-compose.dev.yml up
|
||||
## 运行测试
|
||||
|
||||
```bash
|
||||
# Go + Node 单元测试(推荐)
|
||||
# PR 本地门禁(与 quality-gates 工作流保持一致)
|
||||
./scripts/lint.sh
|
||||
./tests/scripts/check-refactor-line-gate.sh
|
||||
./tests/scripts/run-unit-all.sh
|
||||
npm run build --prefix webui
|
||||
|
||||
# 端到端全链路测试(真实账号)
|
||||
# 端到端全链路测试(真实账号,发布或高风险改动时建议执行)
|
||||
./tests/scripts/run-live.sh
|
||||
```
|
||||
|
||||
|
||||
@@ -64,15 +64,15 @@ Use `config.json` as the single source of truth:
|
||||
|
||||
Built-in GitHub Actions workflow: `.github/workflows/release-artifacts.yml`
|
||||
|
||||
- **Trigger**: only on Release `published` (no build on normal push)
|
||||
- **Outputs**: multi-platform binary archives + `sha256sums.txt`
|
||||
- **Trigger**: by default only on Release `published`; you can also run it manually via `workflow_dispatch` and pass `release_tag` to rerun / backfill
|
||||
- **Outputs**: multi-platform binary archives, Linux Docker image export tarballs, and `sha256sums.txt`
|
||||
- **Container publishing**: GHCR only (`ghcr.io/cjackhwang/ds2api`)
|
||||
|
||||
| Platform | Architecture | Format |
|
||||
| --- | --- | --- |
|
||||
| Linux | amd64, arm64 | `.tar.gz` |
|
||||
| Linux | amd64, arm64, armv7 | `.tar.gz` |
|
||||
| macOS | amd64, arm64 | `.tar.gz` |
|
||||
| Windows | amd64 | `.zip` |
|
||||
| Windows | amd64, arm64 | `.zip` |
|
||||
|
||||
Each archive includes:
|
||||
|
||||
@@ -130,6 +130,9 @@ docker-compose logs -f
|
||||
```
|
||||
|
||||
The default `docker-compose.yml` directly uses `ghcr.io/cjackhwang/ds2api:latest` and maps host port `6011` to container port `5001`. If you want `5001` exposed directly, set `DS2API_HOST_PORT=5001` (or adjust the `ports` mapping).
|
||||
The compose template also defaults to `DS2API_CONFIG_PATH=/data/config.json` with `./config.json:/data/config.json` mounted, so deployments avoid read-only `/app` persistence issues by default.
|
||||
The image pre-creates `/data` and grants it to the non-root `ds2api` user. If you bind-mount a single host file, make sure `config.json` is readable/writable by the container user, for example with `chmod 644 config.json`; otherwise Linux UID/GID mismatches can still cause `open /data/config.json: permission denied`.
|
||||
Compatibility note: when `DS2API_CONFIG_PATH` is unset and runtime base dir is `/app`, newer versions prefer `/data/config.json`; if that file is missing but legacy `/app/config.json` exists, DS2API automatically falls back to the legacy path to avoid post-upgrade config loss.
|
||||
|
||||
If you want a pinned version instead of `latest`, you can also pull a specific tag directly:
|
||||
|
||||
@@ -194,10 +197,46 @@ This repo includes a `zeabur.yaml` template for one-click deployment on Zeabur:
|
||||
Notes:
|
||||
|
||||
- **Port**: DS2API listens on `5001` by default; the template sets `PORT=5001`.
|
||||
- **Persistent config**: the template mounts `/data` and sets `DS2API_CONFIG_PATH=/data/config.json`. After importing config in Admin UI, it will be written and persisted to this path.
|
||||
- **Persistent config**: the template mounts `/data` and sets `DS2API_CONFIG_PATH=/data/config.json`. On a fresh volume, DS2API starts with an empty file-backed config; after importing config in Admin UI, it will be written and persisted to this path.
|
||||
- **`open /app/config.json: permission denied`**: this means the instance is trying to persist runtime tokens to a read-only path (commonly `/app` inside the image).
|
||||
Recommended handling:
|
||||
1. Set a writable path explicitly: `DS2API_CONFIG_PATH=/data/config.json` (and mount a persistent volume at `/data`);
|
||||
2. If you bootstrap with `DS2API_CONFIG_JSON` and do not need runtime writeback, keep env-backed mode (`DS2API_ENV_WRITEBACK` disabled);
|
||||
3. In current versions, login/session tests continue even if persistence fails; Admin API returns a warning that token persistence failed and token is memory-only until restart.
|
||||
- **Build version**: Zeabur / regular `docker build` does not require `BUILD_VERSION` by default. The image prefers that build arg when provided, and automatically falls back to the repo-root `VERSION` file when it is absent.
|
||||
- **First login**: after deployment, open `/admin` and login with `DS2API_ADMIN_KEY` shown in Zeabur env/template instructions (recommended: rotate to a strong secret after first login).
|
||||
|
||||
#### Manual Deployment Without The Template
|
||||
|
||||
If you do not want to use the `zeabur.yaml` one-click template, deploy directly from the repo root with Zeabur's GitHub integration:
|
||||
|
||||
1. Fork this repo, or push the code to your own GitHub repository.
|
||||
2. In Zeabur Dashboard, create a Project, add a Service, then choose a GitHub/Git repository source.
|
||||
3. Select the repository and branch. Keep Root Directory as `/`.
|
||||
4. Use the Dockerfile build path. Zeabur auto-detects the repo-root `Dockerfile`; do not set `ZBPACK_IGNORE_DOCKERFILE=true`. If the UI asks for a Dockerfile name, enter `Dockerfile`.
|
||||
5. Add a persistent volume in the Service settings and mount it at `/data`.
|
||||
6. Configure environment variables:
|
||||
|
||||
| Variable | Recommended value | Description |
|
||||
| --- | --- | --- |
|
||||
| `PORT` | `5001` | Service listen port; keep it aligned with the exposed Zeabur HTTP port. |
|
||||
| `DS2API_ADMIN_KEY` | Strong random string | Required admin login key. |
|
||||
| `DS2API_CONFIG_PATH` | `/data/config.json` | Recommended persistent config path. |
|
||||
| `LOG_LEVEL` | `INFO` | Optional log level. |
|
||||
| `DS2API_CONFIG_JSON` | Raw JSON or Base64 JSON | Optional config bootstrap from env. |
|
||||
| `DS2API_ENV_WRITEBACK` | `1` | Optional; enable only when using `DS2API_CONFIG_JSON` and you want the initial config written to `/data/config.json`. |
|
||||
|
||||
7. Expose HTTP port `5001`. The health check path can be `/healthz`.
|
||||
8. After deployment, open `/admin`, login with `DS2API_ADMIN_KEY`, then import or edit config in Admin UI. A fresh volume does not need `/data/config.json` up front; the service boots first and creates the file on the first save.
|
||||
|
||||
Troubleshooting:
|
||||
|
||||
- **Startup log says `open /data/config.json: no such file or directory`**: make sure you deployed a version that includes the fresh-volume bootstrap fix, then redeploy the latest code.
|
||||
- **`open /app/config.json: permission denied`**: the config path still points at the read-only image directory; mount `/data` and set `DS2API_CONFIG_PATH=/data/config.json`.
|
||||
- **Config disappears after restart**: check that the `/data` persistent volume is mounted on this service. If you use `DS2API_CONFIG_JSON` but want Admin UI saves persisted, enable `DS2API_ENV_WRITEBACK=1`.
|
||||
|
||||
References: Zeabur's official [GitHub/Git integration](https://zeabur.com/docs/en-US/deploy/github), [Dockerfile deployment](https://zeabur.com/docs/en-US/deploy/dockerfile), and [Volumes](https://zeabur.com/docs/data-management/volumes) docs.
|
||||
|
||||
---
|
||||
|
||||
## 3. Vercel Deployment
|
||||
@@ -259,12 +298,14 @@ VERCEL_TEAM_ID=team_xxxxxxxxxxxx # optional for personal accounts
|
||||
| `DS2API_ENV_WRITEBACK` | When `DS2API_CONFIG_JSON` is present, auto-write to `DS2API_CONFIG_PATH` and switch to file-backed mode after success (`1/true/yes/on`) | Disabled |
|
||||
| `DS2API_VERCEL_INTERNAL_SECRET` | Hybrid streaming internal auth | Falls back to `DS2API_ADMIN_KEY` |
|
||||
| `DS2API_VERCEL_STREAM_LEASE_TTL_SECONDS` | Stream lease TTL | `900` |
|
||||
| `DS2API_RAW_STREAM_SAMPLE_ROOT` | Raw stream sample root for saving/reading samples | `tests/raw_stream_samples` |
|
||||
| `VERCEL_TOKEN` | Vercel sync token | — |
|
||||
| `VERCEL_PROJECT_ID` | Vercel project ID | — |
|
||||
| `VERCEL_TEAM_ID` | Vercel team ID | — |
|
||||
| `DS2API_CHAT_HISTORY_PATH` | Chat history storage path (must be set to `/tmp/chat_history.json` on Vercel, otherwise unavailable due to read-only filesystem) | `data/chat_history.json` |
|
||||
| `DS2API_VERCEL_PROTECTION_BYPASS` | Deployment protection bypass for internal Node→Go calls | — |
|
||||
|
||||
### 3.3 Vercel Architecture
|
||||
### 3.4 Vercel Architecture
|
||||
|
||||
```text
|
||||
Request ──────┐
|
||||
@@ -300,13 +341,14 @@ Vercel Go Runtime applies platform-level response buffering, so this project use
|
||||
|
||||
- `api/chat-stream.js` falls back to Go entry (`?__go=1`) for non-stream requests only
|
||||
- Streaming requests (including requests with `tools`) stay on the Node path and use Go-aligned tool-call anti-leak handling
|
||||
- The Node stream path also mirrors Go finalization semantics: empty visible output returns the same shaped error SSE, and empty `content_filter` returns a `content_filter` error
|
||||
- WebUI non-stream test calls `?__go=1` directly to avoid Node hop timeout on long requests
|
||||
|
||||
#### Function Duration
|
||||
|
||||
`vercel.json` sets `maxDuration: 300` for both `api/chat-stream.js` and `api/index.go` (subject to your Vercel plan limits).
|
||||
|
||||
### 3.4 Vercel Troubleshooting
|
||||
### 3.5 Vercel Troubleshooting
|
||||
|
||||
#### Go Build Failure
|
||||
|
||||
@@ -350,7 +392,23 @@ If API responses return Vercel HTML `Authentication Required`:
|
||||
- **Option B**: Add `x-vercel-protection-bypass` header to requests
|
||||
- **Option C**: Set `VERCEL_AUTOMATION_BYPASS_SECRET` (or `DS2API_VERCEL_PROTECTION_BYPASS`) for internal Node→Go calls
|
||||
|
||||
### 3.5 Build Artifacts Not Committed
|
||||
#### Chat History Unavailable (read-only file system)
|
||||
|
||||
```text
|
||||
create chat history dir: mkdir /var/task/data: read-only file system
|
||||
```
|
||||
|
||||
**Cause**: Vercel Serverless functions have a read-only filesystem (`/var/task`). Chat history fails because it cannot create directories there.
|
||||
|
||||
**Fix**: Add the following in Vercel Project Settings → Environment Variables:
|
||||
|
||||
```text
|
||||
DS2API_CHAT_HISTORY_PATH=/tmp/chat_history.json
|
||||
```
|
||||
|
||||
`/tmp` is the only writable directory in Vercel Serverless. Data is ephemeral (not persisted across cold starts), but the feature works within a single instance lifetime.
|
||||
|
||||
### 3.6 Build Artifacts Not Committed
|
||||
|
||||
- `static/admin` directory is not in Git
|
||||
- Vercel / Docker automatically generate WebUI assets during build
|
||||
@@ -392,7 +450,7 @@ Or step by step:
|
||||
|
||||
```bash
|
||||
cd webui
|
||||
npm install
|
||||
npm ci
|
||||
npm run build
|
||||
# Output goes to static/admin/
|
||||
```
|
||||
@@ -536,7 +594,7 @@ curl -s http://127.0.0.1:5001/readyz
|
||||
|
||||
# 3. Model list
|
||||
curl -s http://127.0.0.1:5001/v1/models
|
||||
# Expected: {"object":"list","data":[...]}
|
||||
# Expected: {"object":"list","data":[...]} (including `*-nothinking` variants)
|
||||
|
||||
# 4. Admin panel (if WebUI is built)
|
||||
curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:5001/admin
|
||||
@@ -546,7 +604,7 @@ curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:5001/admin
|
||||
curl http://127.0.0.1:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"model":"deepseek-chat","messages":[{"role":"user","content":"hello"}]}'
|
||||
-d '{"model":"deepseek-v4-flash","messages":[{"role":"user","content":"hello"}]}'
|
||||
```
|
||||
|
||||
---
|
||||
@@ -577,4 +635,4 @@ The testsuite automatically performs:
|
||||
- ✅ Live scenario verification (OpenAI/Claude/Admin/concurrency/toolcall/streaming)
|
||||
- ✅ Full request/response artifact logging for debugging
|
||||
|
||||
For detailed testsuite documentation, see [TESTING.md](TESTING.md).
|
||||
For detailed testsuite documentation, see [TESTING.md](TESTING.md). The fixed local PR gates are listed in [TESTING.md](TESTING.md#pr-门禁--pr-gates).
|
||||
|
||||
@@ -64,15 +64,15 @@ cp config.example.json config.json
|
||||
|
||||
仓库内置 GitHub Actions 工作流:`.github/workflows/release-artifacts.yml`
|
||||
|
||||
- **触发条件**:仅在 Release `published` 时触发(普通 push 不会构建)
|
||||
- **构建产物**:多平台二进制压缩包 + `sha256sums.txt`
|
||||
- **触发条件**:默认仅在 Release `published` 时自动触发;也支持在 Actions 页面手动 `workflow_dispatch`,并填写 `release_tag` 复跑/补发
|
||||
- **构建产物**:多平台二进制压缩包、Linux Docker 镜像导出包 + `sha256sums.txt`
|
||||
- **容器镜像发布**:仅发布到 GHCR(`ghcr.io/cjackhwang/ds2api`)
|
||||
|
||||
| 平台 | 架构 | 文件格式 |
|
||||
| --- | --- | --- |
|
||||
| Linux | amd64, arm64 | `.tar.gz` |
|
||||
| Linux | amd64, arm64, armv7 | `.tar.gz` |
|
||||
| macOS | amd64, arm64 | `.tar.gz` |
|
||||
| Windows | amd64 | `.zip` |
|
||||
| Windows | amd64, arm64 | `.zip` |
|
||||
|
||||
每个压缩包包含:
|
||||
|
||||
@@ -130,6 +130,9 @@ docker-compose logs -f
|
||||
```
|
||||
|
||||
默认 `docker-compose.yml` 直接使用 `ghcr.io/cjackhwang/ds2api:latest`,并把宿主机 `6011` 映射到容器内的 `5001`。如果你希望直接对外暴露 `5001`,请设置 `DS2API_HOST_PORT=5001`(或者手动调整 `ports` 配置)。
|
||||
Compose 模板还会默认设置 `DS2API_CONFIG_PATH=/data/config.json` 并挂载 `./config.json:/data/config.json`,优先避免 `/app` 只读带来的配置持久化问题。
|
||||
镜像内会预创建 `/data` 并授权给非 root 的 `ds2api` 用户;如果你使用 bind mount 单文件,请确保宿主机 `config.json` 至少可被容器用户读取/写入,例如 `chmod 644 config.json`,否则 Linux UID/GID 不一致时仍可能出现 `open /data/config.json: permission denied`。
|
||||
兼容说明:若未设置 `DS2API_CONFIG_PATH` 且运行目录是 `/app`,新版本会优先使用 `/data/config.json`;当该文件不存在但检测到历史 `/app/config.json` 时,会自动回退读取旧路径,避免升级后“配置丢失”。
|
||||
|
||||
如需固定版本,也可以直接拉取指定 tag:
|
||||
|
||||
@@ -194,10 +197,46 @@ healthcheck:
|
||||
部署要点:
|
||||
|
||||
- **端口**:服务默认监听 `5001`,模板会固定设置 `PORT=5001`。
|
||||
- **配置持久化**:模板挂载卷 `/data`,并设置 `DS2API_CONFIG_PATH=/data/config.json`;在管理台导入配置后,会写入并持久化到该路径。
|
||||
- **配置持久化**:模板挂载卷 `/data`,并设置 `DS2API_CONFIG_PATH=/data/config.json`;首次空卷启动时会先使用空的文件模式配置,在管理台导入配置后,会写入并持久化到该路径。
|
||||
- **`open /app/config.json: permission denied`**:说明当前实例在尝试把运行时 token 持久化到只读路径(常见于镜像内 `/app`)。
|
||||
处理建议:
|
||||
1. 显式设置可写路径:`DS2API_CONFIG_PATH=/data/config.json`(并挂载持久卷到 `/data`);
|
||||
2. 若你使用 `DS2API_CONFIG_JSON` 启动且不需要运行时落盘,可保持环境变量模式(`DS2API_ENV_WRITEBACK` 关闭);
|
||||
3. 最新版本中,即使持久化失败,登录/会话测试仍会继续,仅提示“token 未持久化(重启后丢失)”。
|
||||
- **构建版本号**:Zeabur / 普通 `docker build` 默认不需要传 `BUILD_VERSION`;镜像会优先使用该构建参数,未提供时自动回退到仓库根目录的 `VERSION` 文件。
|
||||
- **首次登录**:部署完成后访问 `/admin`,使用 Zeabur 环境变量/模板指引中的 `DS2API_ADMIN_KEY` 登录(建议首次登录后自行更换为强密码)。
|
||||
|
||||
#### 不使用模板手动部署
|
||||
|
||||
如果你不想使用 `zeabur.yaml` 一键模板,可以直接用 Zeabur 的 GitHub 集成从仓库根目录构建:
|
||||
|
||||
1. Fork 本仓库,或把代码推送到你自己的 GitHub 仓库。
|
||||
2. 在 Zeabur Dashboard 中创建 Project,然后添加 Service,选择 GitHub/Git 仓库来源。
|
||||
3. 选择仓库与分支,Root Directory 保持 `/`。
|
||||
4. 构建方式使用 Dockerfile。Zeabur 会自动检测仓库根目录的 `Dockerfile`;不要设置 `ZBPACK_IGNORE_DOCKERFILE=true`。如果界面要求填写 Dockerfile 名称,填写 `Dockerfile`。
|
||||
5. 在 Service 配置中添加持久卷,挂载目录填写 `/data`。
|
||||
6. 配置环境变量:
|
||||
|
||||
| 变量 | 推荐值 | 说明 |
|
||||
| --- | --- | --- |
|
||||
| `PORT` | `5001` | 服务监听端口,需要和 Zeabur 暴露的 HTTP 端口一致。 |
|
||||
| `DS2API_ADMIN_KEY` | 强随机字符串 | 管理台登录密钥,必填。 |
|
||||
| `DS2API_CONFIG_PATH` | `/data/config.json` | 配置持久化路径,建议必填。 |
|
||||
| `LOG_LEVEL` | `INFO` | 可选,日志级别。 |
|
||||
| `DS2API_CONFIG_JSON` | 原始 JSON 或 Base64 JSON | 可选,用于用环境变量初始化配置。 |
|
||||
| `DS2API_ENV_WRITEBACK` | `1` | 可选;当设置了 `DS2API_CONFIG_JSON` 且希望首次启动后写入 `/data/config.json` 时再启用。 |
|
||||
|
||||
7. 暴露 HTTP 端口 `5001`,健康检查路径可填 `/healthz`。
|
||||
8. 部署完成后访问 `/admin`,用 `DS2API_ADMIN_KEY` 登录,然后在管理台导入或编辑配置。首次空卷可以没有 `/data/config.json`,服务会先启动,第一次保存时自动创建该文件。
|
||||
|
||||
常见问题:
|
||||
|
||||
- **启动日志出现 `open /data/config.json: no such file or directory`**:请确认已经部署包含“首次空卷启动”修复的版本,并重新部署最新代码。
|
||||
- **出现 `open /app/config.json: permission denied`**:说明配置路径仍指向镜像内只读目录;设置持久卷 `/data`,并确认 `DS2API_CONFIG_PATH=/data/config.json`。
|
||||
- **管理台保存后重启配置丢失**:检查 `/data` 持久卷是否已挂载到当前服务;如果使用了 `DS2API_CONFIG_JSON`,但想让管理台保存落盘,请启用 `DS2API_ENV_WRITEBACK=1`。
|
||||
|
||||
参考:Zeabur 官方文档的 [GitHub/Git 集成](https://zeabur.com/docs/en-US/deploy/github)、[Dockerfile 部署](https://zeabur.com/docs/zh-CN/deploy/dockerfile) 与 [Volumes](https://zeabur.com/docs/data-management/volumes)。
|
||||
|
||||
---
|
||||
|
||||
## 三、Vercel 部署
|
||||
@@ -258,10 +297,12 @@ VERCEL_TEAM_ID=team_xxxxxxxxxxxx # 个人账号可留空
|
||||
| `DS2API_GLOBAL_MAX_INFLIGHT` | 全局并发上限 | `recommended_concurrency` |
|
||||
| `DS2API_ENV_WRITEBACK` | 检测到 `DS2API_CONFIG_JSON` 时自动写入 `DS2API_CONFIG_PATH`,并在成功后转为文件模式(`1/true/yes/on`) | 关闭 |
|
||||
| `DS2API_VERCEL_INTERNAL_SECRET` | 混合流式内部鉴权 | 回退用 `DS2API_ADMIN_KEY` |
|
||||
| `DS2API_VERCEL_STREAM_LEASE_TTL_SECONDS` | 流式 lease TTL | 默认与 `responses.store_ttl_seconds` 同步,若未设置则为 `900` |
|
||||
| `DS2API_VERCEL_STREAM_LEASE_TTL_SECONDS` | 流式 lease TTL | `900` |
|
||||
| `DS2API_RAW_STREAM_SAMPLE_ROOT` | raw stream 样本保存/读取根目录 | `tests/raw_stream_samples` |
|
||||
| `VERCEL_TOKEN` | Vercel 同步 token | — |
|
||||
| `VERCEL_PROJECT_ID` | Vercel 项目 ID | — |
|
||||
| `VERCEL_TEAM_ID` | Vercel 团队 ID | — |
|
||||
| `DS2API_CHAT_HISTORY_PATH` | Chat history 存储路径(Vercel 上必须设为 `/tmp/chat_history.json`,否则因文件系统只读而不可用) | `data/chat_history.json` |
|
||||
| `DS2API_VERCEL_PROTECTION_BYPASS` | 部署保护绕过密钥(内部 Node→Go 调用) | — |
|
||||
|
||||
### 3.3 运行时行为配置(通过 Admin API 设置)
|
||||
@@ -274,7 +315,7 @@ VERCEL_TEAM_ID=team_xxxxxxxxxxxx # 个人账号可留空
|
||||
|
||||
详细说明参见 [API.md](../API.md#admin-接口) 中 `/admin/settings` 部分。
|
||||
|
||||
### 3.3 Vercel 架构说明
|
||||
### 3.4 Vercel 架构说明
|
||||
|
||||
```text
|
||||
请求 ─────┐
|
||||
@@ -310,13 +351,14 @@ api/index.go api/chat-stream.js
|
||||
|
||||
- `api/chat-stream.js` 仅对非流式请求回退到 Go 入口(`?__go=1`)
|
||||
- 流式请求(包括带 `tools`)走 Node 路径,并执行与 Go 对齐的 tool-call 防泄漏处理
|
||||
- Node 流式路径同时对齐 Go 的终结态语义:空可见输出会返回同形状错误 SSE,空 `content_filter` 会返回 `content_filter` 错误
|
||||
- WebUI 的"非流式测试"直接请求 `?__go=1`,避免 Node 中转造成长请求超时
|
||||
|
||||
#### 函数时长
|
||||
|
||||
`vercel.json` 已将 `api/chat-stream.js` 与 `api/index.go` 的 `maxDuration` 设为 `300`(受 Vercel 套餐上限约束)。
|
||||
|
||||
### 3.4 Vercel 常见报错排查
|
||||
### 3.5 Vercel 常见报错排查
|
||||
|
||||
#### Go 构建失败
|
||||
|
||||
@@ -360,7 +402,23 @@ No Output Directory named "public" found after the Build completed.
|
||||
- **方案 B**:请求中添加 `x-vercel-protection-bypass` 头
|
||||
- **方案 C**:设置 `VERCEL_AUTOMATION_BYPASS_SECRET`(或 `DS2API_VERCEL_PROTECTION_BYPASS`),仅影响内部 Node→Go 调用
|
||||
|
||||
### 3.5 仓库不提交构建产物
|
||||
#### Chat History 不可用(read-only file system)
|
||||
|
||||
```text
|
||||
create chat history dir: mkdir /var/task/data: read-only file system
|
||||
```
|
||||
|
||||
**原因**:Vercel Serverless 函数的文件系统(`/var/task`)为只读,chat history 尝试在该路径下创建目录失败。
|
||||
|
||||
**解决**:在 Vercel Project Settings → Environment Variables 中添加:
|
||||
|
||||
```text
|
||||
DS2API_CHAT_HISTORY_PATH=/tmp/chat_history.json
|
||||
```
|
||||
|
||||
`/tmp` 是 Vercel Serverless 环境中唯一可写的目录。数据在函数冷启动之间不会持久化(ephemeral),但在单个实例生命周期内功能正常。
|
||||
|
||||
### 3.6 仓库不提交构建产物
|
||||
|
||||
- `static/admin` 目录不在 Git 中
|
||||
- Vercel / Docker 构建阶段自动生成 WebUI 静态文件
|
||||
@@ -402,7 +460,7 @@ go run ./cmd/ds2api
|
||||
|
||||
```bash
|
||||
cd webui
|
||||
npm install
|
||||
npm ci
|
||||
npm run build
|
||||
# 产物输出到 static/admin/
|
||||
```
|
||||
@@ -546,7 +604,7 @@ curl -s http://127.0.0.1:5001/readyz
|
||||
|
||||
# 3. 模型列表
|
||||
curl -s http://127.0.0.1:5001/v1/models
|
||||
# 预期: {"object":"list","data":[...]}
|
||||
# 预期: {"object":"list","data":[...]}(包含 `*-nothinking` 变体)
|
||||
|
||||
# 4. 管理台页面(如果已构建 WebUI)
|
||||
curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:5001/admin
|
||||
@@ -556,7 +614,7 @@ curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:5001/admin
|
||||
curl http://127.0.0.1:5001/v1/chat/completions \
|
||||
-H "Authorization: Bearer your-api-key" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"model":"deepseek-chat","messages":[{"role":"user","content":"hello"}]}'
|
||||
-d '{"model":"deepseek-v4-flash","messages":[{"role":"user","content":"hello"}]}'
|
||||
```
|
||||
|
||||
---
|
||||
@@ -587,4 +645,4 @@ go run ./cmd/ds2api-tests \
|
||||
- ✅ 真实调用场景验证(OpenAI/Claude/Admin/并发/toolcall/流式)
|
||||
- ✅ 全量请求与响应日志落盘(用于故障复盘)
|
||||
|
||||
详细测试集说明参阅 [TESTING.md](TESTING.md)。
|
||||
详细测试集说明参阅 [TESTING.md](TESTING.md)。PR 前的固定本地门禁以 [TESTING.md](TESTING.md#pr-门禁--pr-gates) 为准。
|
||||
|
||||
112
docs/DEVELOPMENT.md
Normal file
112
docs/DEVELOPMENT.md
Normal file
@@ -0,0 +1,112 @@
|
||||
# DS2API 开发者速查
|
||||
|
||||
语言 / Language: 中文
|
||||
|
||||
本文面向维护者和贡献者,用于快速判断“从哪里看、改哪里、跑什么”。架构细节仍以 [ARCHITECTURE.md](./ARCHITECTURE.md) 为准,接口行为以 [API.md](../API.md) 为准。
|
||||
|
||||
## 1. 本地入口
|
||||
|
||||
常用启动与检查:
|
||||
|
||||
```bash
|
||||
# 后端
|
||||
go run ./cmd/ds2api
|
||||
|
||||
# WebUI 开发服务器
|
||||
npm run dev --prefix webui
|
||||
|
||||
# WebUI 生产构建
|
||||
npm run build --prefix webui
|
||||
```
|
||||
|
||||
PR 前固定门禁:
|
||||
|
||||
```bash
|
||||
./scripts/lint.sh
|
||||
./tests/scripts/check-refactor-line-gate.sh
|
||||
./tests/scripts/run-unit-all.sh
|
||||
npm run build --prefix webui
|
||||
```
|
||||
|
||||
修改 Go 文件后先运行:
|
||||
|
||||
```bash
|
||||
gofmt -w <changed-go-files>
|
||||
```
|
||||
|
||||
## 2. 代码定位
|
||||
|
||||
优先从这些入口顺着调用链看:
|
||||
|
||||
| 目标 | 入口 |
|
||||
| --- | --- |
|
||||
| 总路由、CORS、健康检查 | `internal/server/router.go` |
|
||||
| OpenAI Chat / Responses | `internal/httpapi/openai/chat`、`internal/httpapi/openai/responses` |
|
||||
| Claude / Gemini 兼容入口 | `internal/httpapi/claude`、`internal/httpapi/gemini` |
|
||||
| API 请求归一到网页纯文本上下文 | `internal/promptcompat`、`docs/prompt-compatibility.md` |
|
||||
| 工具调用解析与流式防泄漏 | `internal/toolcall`、`internal/toolstream`、`docs/toolcall-semantics.md` |
|
||||
| DeepSeek 上游调用、登录、PoW、代理 | `internal/deepseek/client`、`internal/deepseek/transport` |
|
||||
| 账号池、并发槽位、等待队列 | `internal/account` |
|
||||
| Admin API | `internal/httpapi/admin` |
|
||||
| WebUI 页面 | `webui/src/layout/DashboardShell.jsx`、`webui/src/features/*` |
|
||||
| 服务器端对话记录 | `internal/chathistory`、`internal/httpapi/admin/history` |
|
||||
|
||||
## 3. 常见改动建议
|
||||
|
||||
- 改接口行为时,同时检查 `API.md` / `API.en.md` 是否需要同步。
|
||||
- 改 prompt 兼容链路时,必须同步 `docs/prompt-compatibility.md`。
|
||||
- 改 tool call 语义时,同时检查 Go、Node sieve 和 `docs/toolcall-semantics.md`。
|
||||
- 改 WebUI 配置项时,同时检查 `webui/src/features/settings`、语言包和 `config.example.json`。
|
||||
- 拆分大文件时,保持对外函数签名稳定,并跑 `./tests/scripts/check-refactor-line-gate.sh`。
|
||||
|
||||
## 4. 故障定位
|
||||
|
||||
接口请求先看路由入口,再看协议适配层,最后看共享 runtime:
|
||||
|
||||
1. 路由是否命中:`internal/server/router.go` 和对应 `RegisterRoutes`。
|
||||
2. 鉴权与账号选择:`internal/auth`、`internal/account`。
|
||||
3. 请求归一化:`internal/promptcompat` 或协议转换包。
|
||||
4. 上游请求:`internal/deepseek/client`。
|
||||
5. 流式输出:`internal/stream`、`internal/sse`、`internal/toolstream`。
|
||||
6. 响应格式:主路径看 `internal/assistantturn` 与 `internal/format/*`;`internal/translatorcliproxy` 只用于 Vercel/fallback/test 桥接。
|
||||
|
||||
对话记录页面问题优先检查:
|
||||
|
||||
- Admin API:`/admin/chat-history`、`/admin/chat-history/{id}`。
|
||||
- 后端存储:`internal/chathistory/store.go`。
|
||||
- 前端轮询和 ETag:`webui/src/features/chatHistory/ChatHistoryContainer.jsx`。
|
||||
|
||||
Tool call 问题优先跑:
|
||||
|
||||
```bash
|
||||
go test -v ./internal/toolcall ./internal/toolstream -count=1
|
||||
node --test tests/node/stream-tool-sieve.test.js tests/node/chat-stream.test.js
|
||||
```
|
||||
|
||||
## 5. 测试选择
|
||||
|
||||
小范围 Go 改动:
|
||||
|
||||
```bash
|
||||
go test ./internal/<package> -count=1
|
||||
```
|
||||
|
||||
前端改动:
|
||||
|
||||
```bash
|
||||
npm run build --prefix webui
|
||||
```
|
||||
|
||||
高风险协议或流式改动:
|
||||
|
||||
```bash
|
||||
./tests/scripts/run-unit-all.sh
|
||||
```
|
||||
|
||||
发布或真实账号链路验证:
|
||||
|
||||
```bash
|
||||
./tests/scripts/run-live.sh
|
||||
```
|
||||
|
||||
端到端测试产物默认写入 `artifacts/testsuite/`。分享日志前需要清理 token、密码、cookie 和原始请求响应内容。
|
||||
@@ -1,7 +1,7 @@
|
||||
# DeepSeek SSE 行为结构说明(第三方逆向版)
|
||||
|
||||
> 说明:本文基于当前仓库 `tests/raw_stream_samples/` 下全部 `upstream.stream.sse` 原始流样本整理而成,属于第三方逆向观察文档,不是官方协议。
|
||||
> 当前 corpus 由 4 份原始流组成,覆盖搜索+引用、风控终态、Markdown 输出和空格敏感输出等行为。
|
||||
> 当前 corpus 由 5 份原始流组成,覆盖长文本生成、文件上传上下文、continue 接续、Markdown 输出和空格敏感输出等行为。
|
||||
> 补充:文末还会注明少量“当前实现已确认、但 corpus 尚未完整覆盖”的行为,例如长思考场景下的自动续写状态。
|
||||
|
||||
文档导航:[文档总索引](./README.md) / [测试指南](./TESTING.md) / [样本目录说明](../tests/raw_stream_samples/README.md)
|
||||
@@ -12,8 +12,9 @@
|
||||
|
||||
| 样本 | 观察重点 |
|
||||
| --- | --- |
|
||||
| [guangzhou-weather-reasoner-search-20260404](../tests/raw_stream_samples/guangzhou-weather-reasoner-search-20260404/upstream.stream.sse) | 搜索+思考流程,包含 `reference:N` 引用标记与工具片段 |
|
||||
| [content-filter-trigger-20260405-jwt3](../tests/raw_stream_samples/content-filter-trigger-20260405-jwt3/upstream.stream.sse) | `CONTENT_FILTER` 终态分支,包含拒答模板与 `ban_regenerate` |
|
||||
| [longtext-deepseek-v4-flash-20260429](../tests/raw_stream_samples/longtext-deepseek-v4-flash-20260429/upstream.stream.sse) | DeepSeek V4 flash 长文本流,包含 current input file 上传后的 completion 样本 |
|
||||
| [longtext-deepseek-v4-pro-20260429](../tests/raw_stream_samples/longtext-deepseek-v4-pro-20260429/upstream.stream.sse) | DeepSeek V4 pro 长文本流,包含文件上传上下文和较长 reasoning/content 输出 |
|
||||
| [continue-thinking-snapshot-replay-20260405](../tests/raw_stream_samples/continue-thinking-snapshot-replay-20260405/upstream.stream.sse) | 多轮 `completion + continue` 原始流,用于验证接续思考去重 |
|
||||
| [markdown-format-example-20260405](../tests/raw_stream_samples/markdown-format-example-20260405/upstream.stream.sse) | Markdown 输出的早期样本,用于观察 token 级输出形态 |
|
||||
| [markdown-format-example-20260405-spacefix](../tests/raw_stream_samples/markdown-format-example-20260405-spacefix/upstream.stream.sse) | Markdown 输出修正样本,用于验证空格 chunk 必须保留 |
|
||||
|
||||
@@ -194,7 +195,7 @@ close
|
||||
|
||||
## 8. 终态行为
|
||||
|
||||
当前 corpus 里有两条很重要的终态分支。
|
||||
当前 corpus 直接覆盖正常完成和 continue 接续;当前实现还兼容 `CONTENT_FILTER` 风控终态,相关分支由协议观察与兼容性 fixture 继续守护。
|
||||
|
||||
### 8.1 正常完成
|
||||
|
||||
@@ -208,7 +209,7 @@ close
|
||||
|
||||
### 8.2 风控终态
|
||||
|
||||
`content-filter-trigger-20260405-jwt3` 展示了另一种终态路径:
|
||||
`CONTENT_FILTER` 不在当前 raw stream corpus 的目录样本中,但代码和兼容性测试仍按下面这种终态路径处理:
|
||||
|
||||
1. 先继续输出一段正常正文。
|
||||
2. 出现提示类 fragment,例如 `TIP`。
|
||||
@@ -309,7 +310,18 @@ parse SSE block
|
||||
- 新模型可能增加新的 `p` 路径。
|
||||
- 新版本可能增加新的 fragment.type。
|
||||
- `CONTENT_FILTER` 的终态模板内容可能变化。
|
||||
- 自动续写相关状态(如 `INCOMPLETE` / `AUTO_CONTINUE`)当前主要来自实测与实现兼容逻辑,后续字段形态仍可能变化。
|
||||
- 自动续写相关状态(如 `INCOMPLETE` / `AUTO_CONTINUE`)当前主要来自实测与实现兼容逻辑,后续字段形态仍可能变化。当前实现不会仅因早期 `WIP` 状态就自动继续;只有显式 `INCOMPLETE` 或 `auto_continue` 信号才会触发 continue。
|
||||
- 解析器应当对未知字段、未知路径、未知事件保持容忍。
|
||||
|
||||
如果你要把这份说明用于实际开发,建议同时保留原始流样本、回放脚本和回归测试,不要只依赖本文。
|
||||
|
||||
## 2026-04-29 最近线上样本增量观察
|
||||
|
||||
基于 `longtext-deepseek-v4-flash-20260429` 与 `longtext-deepseek-v4-pro-20260429` 两个真实账号长文本样本,近期格式变化要点如下:
|
||||
|
||||
1. `data:` 事件中仍大量出现 `{"v":"..."}` 的无路径增量(`p` 缺失),解析器必须把空路径视为可见正文候选,而不能只依赖 `response/content`。
|
||||
2. 对象形态 `v`(如 `{"text":"..."}` / `{"content":"..."}`)仍会出现,且可能与无路径 chunk 混用;仅按字符串处理会导致正文丢块。
|
||||
3. 多轮 continuation 场景下,后续 chunk 可能不再重复显式 `status`,状态机需要保留上一轮 `INCOMPLETE` 语义直到出现终态。
|
||||
4. 2026-04-29 起客户端头部版本基线上调到 `x-client-version: 2.0.3`,否则部分账号会出现上游行为不一致(包括空输出与补轮异常)。
|
||||
|
||||
建议:新增样本默认回放应优先覆盖「长文本 + 多轮 + 无路径 chunk」组合,避免只用短样本导致回归漏检。
|
||||
|
||||
@@ -11,18 +11,23 @@
|
||||
3. [接口文档(API)](../API.md)
|
||||
4. [部署指南](./DEPLOY.md)
|
||||
5. [测试指南](./TESTING.md)
|
||||
6. [贡献指南](./CONTRIBUTING.md)
|
||||
6. [开发者速查](./DEVELOPMENT.md)
|
||||
7. [贡献指南](./CONTRIBUTING.md)
|
||||
|
||||
### 专题文档
|
||||
|
||||
- [DS2API 项目价值说明](./project-value.md)
|
||||
- [API -> 网页对话纯文本兼容主链路说明](./prompt-compatibility.md)
|
||||
- [Tool Calling 统一语义](./toolcall-semantics.md)
|
||||
- [DeepSeek SSE 行为结构说明(逆向观察)](./DeepSeekSSE行为结构说明-2026-04-05.md)
|
||||
|
||||
### 文档维护约定
|
||||
|
||||
- 文档更新必须以实际代码实现为依据:总路由装配看 `internal/server/router.go`,协议/resource 路由看 `internal/httpapi/**/handler*.go` 与 `internal/httpapi/admin/handler.go`,配置默认值看 `internal/config/*`,模型/alias 看 `internal/config/models.go`,prompt 兼容链路看 `docs/prompt-compatibility.md` 列出的代码入口。
|
||||
- `README.MD` / `README.en.md`:面向首次接触用户,保留“是什么 + 怎么快速跑起来”。
|
||||
- `docs/ARCHITECTURE*.md`:面向开发者,集中维护项目结构、模块职责与调用链。
|
||||
- `API*.md`:面向客户端接入者,聚焦接口行为、鉴权和示例。
|
||||
- `docs/prompt-compatibility.md`:面向维护者,集中维护“API -> 网页对话纯文本上下文”的统一兼容语义;相关行为修改时必须同步更新。
|
||||
- 其他 `docs/*.md`:主题化说明,避免在多个文档重复粘贴同一段内容。
|
||||
|
||||
---
|
||||
@@ -38,16 +43,21 @@ Recommended reading order:
|
||||
3. [API reference](../API.en.md)
|
||||
4. [Deployment guide](./DEPLOY.en.md)
|
||||
5. [Testing guide](./TESTING.md)
|
||||
6. [Contributing guide](./CONTRIBUTING.en.md)
|
||||
6. [Developer quick reference](./DEVELOPMENT.md)
|
||||
7. [Contributing guide](./CONTRIBUTING.en.md)
|
||||
|
||||
### Topical docs
|
||||
|
||||
- [DS2API project value note](./project-value.md)
|
||||
- [API -> pure-text web-chat compatibility pipeline](./prompt-compatibility.md)
|
||||
- [Tool-calling unified semantics](./toolcall-semantics.md)
|
||||
- [DeepSeek SSE behavior notes (reverse-engineered)](./DeepSeekSSE行为结构说明-2026-04-05.md)
|
||||
|
||||
### Maintenance conventions
|
||||
|
||||
- Documentation updates must be grounded in the actual implementation: root routing lives in `internal/server/router.go`, protocol/resource routes live in `internal/httpapi/**/handler*.go` and `internal/httpapi/admin/handler.go`, config defaults in `internal/config/*`, models/aliases in `internal/config/models.go`, and the prompt compatibility pipeline in the code entrypoints listed by `docs/prompt-compatibility.md`.
|
||||
- `README.MD` / `README.en.md`: onboarding-oriented (“what + quick start”).
|
||||
- `docs/ARCHITECTURE*.md`: developer-oriented source of truth for module boundaries and execution flow.
|
||||
- `API*.md`: integration-oriented behavior/contracts.
|
||||
- `docs/prompt-compatibility.md`: maintainer-oriented source of truth for the “API -> pure-text web-chat context” compatibility flow; update it whenever related behavior changes.
|
||||
- Other `docs/*.md`: focused topics, avoid copy-pasting the same section into multiple files.
|
||||
|
||||
@@ -13,6 +13,7 @@ DS2API 提供两个层级的测试:
|
||||
| 单元测试(Go) | `./tests/scripts/run-unit-go.sh` | 不需要真实账号 |
|
||||
| 单元测试(Node) | `./tests/scripts/run-unit-node.sh` | 不需要真实账号 |
|
||||
| 单元测试(全部) | `./tests/scripts/run-unit-all.sh` | 不需要真实账号 |
|
||||
| Release 目标交叉编译 | `./tests/scripts/check-cross-build.sh` | 覆盖发布包支持的 GOOS/GOARCH |
|
||||
| 端到端测试 | `./tests/scripts/run-live.sh` | 使用真实账号执行全链路测试 |
|
||||
|
||||
端到端测试集会录制完整的请求/响应日志,用于故障排查。
|
||||
@@ -20,6 +21,26 @@ Node 单元测试脚本会先做 `node --check` 语法门禁,再以 `--test-co
|
||||
|
||||
---
|
||||
|
||||
## PR 门禁 | PR Gates
|
||||
|
||||
打开或更新 PR 前,按 `.github/workflows/quality-gates.yml` 的同等本地门禁执行:
|
||||
|
||||
```bash
|
||||
./scripts/lint.sh
|
||||
./tests/scripts/check-refactor-line-gate.sh
|
||||
./tests/scripts/run-unit-all.sh
|
||||
npm run build --prefix webui
|
||||
```
|
||||
|
||||
说明:
|
||||
|
||||
- `./scripts/lint.sh` 会运行 Go 格式化检查和 `golangci-lint`;修改 Go 文件后仍建议先执行 `gofmt -w <files>`。
|
||||
- `run-unit-all.sh` 串行调用 Go 与 Node 单元测试入口。
|
||||
- CI 还会额外在 macOS/Windows 跑 Go 单测,并执行 release 目标交叉编译检查。
|
||||
- `run-live.sh` 是真实账号端到端测试,适合作为发布或高风险改动后的补充验证,不属于每次 PR 的固定本地门禁。
|
||||
|
||||
---
|
||||
|
||||
## 快速开始 | Quick Start
|
||||
|
||||
### 单元测试 | Unit Tests
|
||||
@@ -38,11 +59,11 @@ Node 单元测试脚本会先做 `node --check` 语法门禁,再以 `--test-co
|
||||
# 结构与流程门禁
|
||||
./tests/scripts/check-refactor-line-gate.sh
|
||||
./tests/scripts/check-node-split-syntax.sh
|
||||
|
||||
# 发布阻断:阶段 6 手工烟测签字检查(默认读取 plans/stage6-manual-smoke.md)
|
||||
./tests/scripts/check-stage6-manual-smoke.sh
|
||||
./tests/scripts/check-cross-build.sh
|
||||
```
|
||||
|
||||
说明:`plans/stage6-manual-smoke.md` 已移除,阶段 6 手工烟测不再作为当前 CI 或发布门禁。
|
||||
|
||||
### 端到端测试 | End-to-End Tests
|
||||
|
||||
```bash
|
||||
@@ -190,8 +211,8 @@ go test -v -run TestParseToolCallsWithDeepSeekHallucination ./internal/toolcall/
|
||||
# 运行 format 相关测试
|
||||
go test -v ./internal/format/...
|
||||
|
||||
# 运行 adapter 相关测试
|
||||
go test -v ./internal/adapter/openai/...
|
||||
# 运行 HTTP API 相关测试
|
||||
go test -v ./internal/httpapi/openai/...
|
||||
```
|
||||
|
||||
### 调试 Tool Call 问题 | Debugging Tool Call Issues
|
||||
|
||||
119
docs/project-value.md
Normal file
119
docs/project-value.md
Normal file
@@ -0,0 +1,119 @@
|
||||
# DS2API 项目价值说明
|
||||
|
||||
文档导航:[总览](../README.MD) / [文档索引](./README.md) / [接口文档](../API.md) / [兼容主链路](./prompt-compatibility.md) / [Tool Calling 语义](./toolcall-semantics.md)
|
||||
|
||||
> 本文用于说明 DS2API 的项目定位与长期价值。
|
||||
> 它不是架构说明,也不是功能清单,而是从“网页能力如何稳定 API 化”这个角度解释本项目为什么成立。
|
||||
|
||||
## 1. 项目定位
|
||||
|
||||
DS2API 的定位不是“又一个 API 代理”,也不是训练工具。
|
||||
|
||||
它本质上是一个网页转 API 的兼容层:把 DeepSeek 网页对话侧可用的能力,整理成 OpenAI / Claude / Gemini 风格客户端可以接入的请求与响应形态。
|
||||
|
||||
本项目的核心价值在于:
|
||||
|
||||
1. 把 DeepSeek 网页对话能力 API 化。
|
||||
2. 把不同客户端协议统一到同一套兼容入口。
|
||||
3. 把网页侧会话、thinking、文件引用、流式输出等行为整理成客户端可消费的结果。
|
||||
4. 为上层编程工具、自动化工具或外部编排器提供稳定后端。
|
||||
|
||||
## 2. 解决的问题
|
||||
|
||||
### 2.1 把网页能力变成可接入的 API 形态
|
||||
|
||||
网页侧能力可以直接对话,但标准客户端需要的是稳定的 API 契约。两者之间有一段天然差距:
|
||||
|
||||
- 输入格式不同
|
||||
- 输出事件不同
|
||||
- 流式语义不同
|
||||
- 文件引用方式不同
|
||||
- thinking 与正文的暴露方式不同
|
||||
|
||||
DS2API 通过 `promptcompat`、`completionruntime`、`assistantturn` 和各协议 renderer,把这段差距收敛到一条可维护的主链路中:
|
||||
|
||||
- 请求侧把 OpenAI / Claude / Gemini 消息归一成网页纯文本上下文。
|
||||
- 上游侧按 DeepSeek 网页 completion 需要的 payload 发起会话。
|
||||
- 输出侧把 DeepSeek SSE 收集或流式事件再渲染回各协议原生形态。
|
||||
|
||||
这才是本项目的主定义:把网页能力稳定转成 API 可消费形态。
|
||||
|
||||
### 2.2 不只是转发,而是兼容
|
||||
|
||||
普通转发只能把请求送出去,无法处理协议语义之间的差异。DS2API 需要额外处理:
|
||||
|
||||
- 模型 alias 与 DeepSeek 原生模型的映射
|
||||
- thinking / reasoning 开关与输出结构
|
||||
- search 与 citation / reference 标记
|
||||
- 文件上传、历史文件和 current input file
|
||||
- 上游空输出、content filter、auto-continue、重试和 usage 估算
|
||||
|
||||
这些都不是“把 URL 改一下”能解决的事情。项目价值正是在这些细节里体现出来。
|
||||
|
||||
### 2.3 让外部工具链能挂上去
|
||||
|
||||
当用户把 DS2API 接到编程工具、自动化工具或第三方 SDK 时,很多请求会变成长链路任务:
|
||||
|
||||
- 读取文件
|
||||
- 搜索上下文
|
||||
- 修改代码
|
||||
- 执行命令
|
||||
- 继续修正
|
||||
- 输出最终结果
|
||||
|
||||
DS2API 不直接定义这些外部工具链,但它提供了一个足够稳定的 API 底座,让这些工具链可以外挂在上面继续工作。
|
||||
|
||||
## 3. 工具调用的价值
|
||||
|
||||
工具调用不是 DS2API 成立的前提,但它是项目很重要的增强能力。
|
||||
|
||||
即使没有工具调用,DS2API 仍然是网页转 API 兼容层;当请求包含工具能力时,项目会额外处理模型输出漂移、长参数和流式防泄漏等问题:
|
||||
|
||||
- 长脚本用 CDATA 保住原文
|
||||
- 文件路径和命令参数不容易被转义打坏
|
||||
- tool call 语法有统一的 DSML / canonical XML 处理
|
||||
- 模型输出漂了也能宽匹配、自修正
|
||||
- 流式场景能尽量不把工具块漏回普通文本
|
||||
|
||||
这使 DS2API 可以服务编程工具和 agent 类客户端,但项目主轴仍然是“网页能力 API 化”,不是把工具调用当作项目唯一卖点。
|
||||
|
||||
## 4. CDATA 的作用
|
||||
|
||||
CDATA 不是项目价值本身,但它是工具调用与长文本兼容中很实用的一部分。
|
||||
|
||||
对本项目这种场景来说,CDATA 的作用很直接:
|
||||
|
||||
- 保护长文本不被转义破坏
|
||||
- 保住脚本、命令、代码片段的原样性
|
||||
- 让结构化参数和自由文本更稳定地共存
|
||||
- 让历史内容更容易被原样回放和再处理
|
||||
|
||||
它的意义不是让协议显得更复杂,而是让内容更少在转写、解析和回放过程中坏掉。
|
||||
|
||||
## 5. 它不是什么
|
||||
|
||||
为了避免误解,需要明确项目边界:
|
||||
|
||||
- 不是官方 DeepSeek API。
|
||||
- 不是训练平台。
|
||||
- 不是人工标注系统。
|
||||
- 不是独立评测工具。
|
||||
- 不是简单反代。
|
||||
|
||||
DS2API 是兼容层。它的职责是把网页能力整理成 API 体验,并在必要时对工具、历史、文件和流式输出做兼容处理。
|
||||
|
||||
## 6. 长期价值
|
||||
|
||||
DS2API 的长期价值,不在某个单点功能,而在于它把多个难点放进了同一条可维护链路:
|
||||
|
||||
- 多协议入口
|
||||
- DeepSeek 网页 completion 适配
|
||||
- prompt 纯文本兼容
|
||||
- thinking / search / file 引用处理
|
||||
- Go / Node 流式输出对齐
|
||||
- tool call 解析与防泄漏
|
||||
- Admin / WebUI / 账号池 / 并发队列
|
||||
|
||||
如果要用一句话概括它的价值,可以写成:
|
||||
|
||||
**DS2API 的价值,是把 DeepSeek 网页能力稳定整理成标准客户端可以持续使用的 API 形态。**
|
||||
436
docs/prompt-compatibility.md
Normal file
436
docs/prompt-compatibility.md
Normal file
@@ -0,0 +1,436 @@
|
||||
# API -> 网页对话纯文本兼容主链路说明
|
||||
|
||||
文档导航:[总览](../README.MD) / [架构说明](./ARCHITECTURE.md) / [接口文档](../API.md) / [测试指南](./TESTING.md)
|
||||
|
||||
> 本文档是 DS2API“把 OpenAI / Claude / Gemini 风格 API 请求兼容成 DeepSeek 网页对话纯文本上下文”的专项说明。
|
||||
> 这是项目最重要的兼容产物之一。凡是修改消息标准化、tool prompt 注入、tool history 保留、文件引用、current input file、下游 completion payload 组装等行为,都必须同步更新本文档。
|
||||
|
||||
## 1. 核心结论
|
||||
|
||||
DS2API 当前的核心思路,不是把客户端传来的 `messages`、`tools`、`attachments` 原样转发给下游。
|
||||
|
||||
而是把这些高层 API 语义,统一压缩成 DeepSeek 网页对话更容易理解的三类输入:
|
||||
|
||||
1. `prompt`
|
||||
一个单字符串,里面带有角色标记、system 指令、历史消息、assistant reasoning 标签、历史 tool call XML 等。
|
||||
2. `ref_file_ids`
|
||||
一个文件引用数组,承载附件、inline 上传文件,以及必要时被拆出去的历史文件。
|
||||
3. 控制位
|
||||
例如 `thinking_enabled`、`search_enabled`、部分 passthrough 参数。
|
||||
|
||||
也就是说,项目最重要的兼容动作,是把“结构化 API 会话”翻译成“网页对话纯文本上下文 + 文件引用”。
|
||||
|
||||
## 2. 为什么这是核心产物
|
||||
|
||||
因为对下游来说,真正稳定的输入面不是 OpenAI/Claude/Gemini 的原生 schema,而是:
|
||||
|
||||
- 一段连续的对话 prompt
|
||||
- 一组可引用文件
|
||||
- 少量开关位
|
||||
|
||||
这也是为什么很多表面上看像“协议兼容”的代码,最终都会收敛到同一类逻辑:
|
||||
|
||||
- 先把不同协议的消息统一成内部消息序列
|
||||
- 再把工具声明改写成 system prompt 文本
|
||||
- 再把历史 tool call / tool result 改写成 prompt 可见内容
|
||||
- 最后输出成 DeepSeek completion payload
|
||||
|
||||
## 3. 统一心智模型
|
||||
|
||||
当前主链路可以这样理解:
|
||||
|
||||
```text
|
||||
客户端请求
|
||||
-> HTTP API surface(OpenAI / Claude / Gemini)
|
||||
-> promptcompat 统一消息标准化
|
||||
-> tool prompt 注入
|
||||
-> DeepSeek 风格 prompt 拼装
|
||||
-> 文件收集 / inline 上传(OpenAI 文件链路)
|
||||
-> current input file(completion runtime 全局入口)
|
||||
-> completion payload
|
||||
-> 下游网页对话接口
|
||||
-> assistantturn 输出语义归一(Go 非流式 + 流式收尾)
|
||||
-> 各协议 renderer(OpenAI / Responses / Claude / Gemini)
|
||||
```
|
||||
|
||||
对应的关键代码入口:
|
||||
|
||||
- OpenAI Chat / Responses:
|
||||
[internal/promptcompat/request_normalize.go](../internal/promptcompat/request_normalize.go)
|
||||
- OpenAI prompt 组装:
|
||||
[internal/promptcompat/prompt_build.go](../internal/promptcompat/prompt_build.go)
|
||||
- OpenAI 消息标准化:
|
||||
[internal/promptcompat/message_normalize.go](../internal/promptcompat/message_normalize.go)
|
||||
- Claude 标准化:
|
||||
[internal/httpapi/claude/standard_request.go](../internal/httpapi/claude/standard_request.go)
|
||||
- Claude 消息与 tool_use/tool_result 归一:
|
||||
[internal/httpapi/claude/handler_utils.go](../internal/httpapi/claude/handler_utils.go)
|
||||
- Gemini 复用 OpenAI prompt builder:
|
||||
[internal/httpapi/gemini/convert_request.go](../internal/httpapi/gemini/convert_request.go)
|
||||
- DeepSeek prompt 角色标记拼装:
|
||||
[internal/prompt/messages.go](../internal/prompt/messages.go)
|
||||
- prompt 可见 tool history XML:
|
||||
[internal/prompt/tool_calls.go](../internal/prompt/tool_calls.go)
|
||||
- 最新 user 思考格式注入:
|
||||
[internal/promptcompat/thinking_injection.go](../internal/promptcompat/thinking_injection.go)
|
||||
- completion payload:
|
||||
[internal/promptcompat/standard_request.go](../internal/promptcompat/standard_request.go)
|
||||
- Go 输出侧 assistant turn:
|
||||
[internal/assistantturn/turn.go](../internal/assistantturn/turn.go)
|
||||
- Go completion runtime:
|
||||
[internal/completionruntime/nonstream.go](../internal/completionruntime/nonstream.go)
|
||||
|
||||
## 4. 下游真正收到的东西
|
||||
|
||||
在“完成标准化后”,下游 completion payload 的核心形态是:
|
||||
|
||||
```json
|
||||
{
|
||||
"chat_session_id": "session-id",
|
||||
"model_type": "default",
|
||||
"parent_message_id": null,
|
||||
"prompt": "<|begin▁of▁sentence|>...",
|
||||
"ref_file_ids": [
|
||||
"file-history",
|
||||
"file-systemprompt",
|
||||
"file-other-attachment"
|
||||
],
|
||||
"thinking_enabled": true,
|
||||
"search_enabled": false
|
||||
}
|
||||
```
|
||||
|
||||
重点是:
|
||||
|
||||
- `prompt` 才是对话上下文主载体。
|
||||
- `ref_file_ids` 只承载文件引用,不承载普通文本消息。
|
||||
- `tools` 不会作为“原生工具 schema”直接下发给下游,而是被改写进 `prompt`。
|
||||
- 对外返回给客户端的 `prompt_tokens` / `input_tokens` / `promptTokenCount` 不再按“最后一条消息”或字符粗估近似返回,而是基于**完整上下文 prompt**做 tokenizer 计数;为了避免上下文实际超限但客户端误以为还能塞下,请求侧上下文 token 会额外保守上浮一点,宁可略大也不低估。
|
||||
- 当前 `/v1/chat/completions` 业务路径仍是“每次请求新建一个远端 `chat_session_id`,并默认发送 `parent_message_id: null`”;因此 DS2API 对外默认表现为“新会话 + prompt 拼历史”,而不是复用 DeepSeek 原生会话树。
|
||||
- 但 DeepSeek 远端本身支持同一 `chat_session_id` 的跨轮次持续对话。2026-04-27 已用项目内现有 DeepSeek client 做过一次不改业务代码的双轮实测:同一 `chat_session_id` 下,第 1 轮返回 `request_message_id=1` / `response_message_id=2` / 文本 `SESSION_TEST_ONE`;第 2 轮重新获取一次 PoW,并发送 `parent_message_id=2` 后,成功返回 `request_message_id=3` / `response_message_id=4` / 文本 `SESSION_TEST_TWO`。这说明“同远端会话持续聊天”能力存在,且每轮需要携带正确的 parent/message 链接信息,同时重新获取对应轮次可用的 PoW。
|
||||
- OpenAI Chat / Responses 原生走统一 OpenAI 标准化与 DeepSeek payload 组装;Claude / Gemini 会尽量复用 OpenAI prompt/tool 语义,其中 Gemini 直接复用 `promptcompat.BuildOpenAIPromptForAdapter`。Go 主服务新增 `completionruntime` 启动层,统一执行 DeepSeek session/PoW/call;输出侧新增 `assistantturn` 语义层:非流式 OpenAI Chat / Responses / Claude / Gemini 会把 DeepSeek SSE 收集结果先归一成同一份 assistant turn,再分别渲染成各协议原生外形;流式 OpenAI Chat / Responses / Claude / Gemini 继续保持各协议实时 SSE framing,但最终收尾的 tool fallback、schema 归一、usage、empty-output / content-filter 错误语义同样由 `assistantturn` 判定。Claude / Gemini 的常规 Go 主路径不再依赖内部 `httptest` 转发到 OpenAI handler;`translatorcliproxy` 仅保留用于 Vercel bridge、后端缺失 fallback 和回归测试,不作为主业务协议转换中心。
|
||||
- Vercel Node 流式路径本轮不迁移,仍使用现有 Node bridge / stream-tool-sieve 实现;后续若变更 Node 流式语义,需要按 `assistantturn` 的 Go canonical 输出语义同步对齐。
|
||||
- 客户端传入的 thinking / reasoning 开关会被归一到下游 `thinking_enabled`。Gemini `generationConfig.thinkingConfig.thinkingBudget` 会翻译成同一套 thinking 开关;关闭时即使上游返回 `response/thinking_content`,兼容层也不会把它当作可见正文输出。若最终解析出的模型名带 `-nothinking` 后缀,则会无条件强制关闭 thinking,优先级高于请求体中的 `thinking` / `reasoning` / `reasoning_effort`。未显式关闭时,各 surface 会按解析后的 DeepSeek 模型默认能力开启 thinking,并用各自协议的原生形态暴露:OpenAI Chat 为 `reasoning_content`,OpenAI Responses 为 `response.reasoning.delta` / `reasoning` content,Claude 为 `thinking` block / `thinking_delta`,Gemini 为 `thought: true` part。
|
||||
- 对 OpenAI Chat / Responses 的非流式收尾,如果最终可见正文为空,兼容层会优先尝试把思维链中的独立 DSML / XML 工具块当作真实工具调用解析出来。流式链路也会在收尾阶段做同样的 fallback 检测,但不会因为思维链内容去中途拦截或改写流式输出;真正的工具识别始终基于原始上游文本,而不是基于“已经做过可见输出清洗”的版本,因此即使最终可见层会剥离完整 leaked DSML / XML `tool_calls` wrapper、并抑制全空参数或无效 wrapper 块,也不会影响真实工具调用转成结构化 `tool_calls` / `function_call`。补发结果会作为本轮 assistant 的结构化 `tool_calls` / `function_call` 输出返回,而不是塞进 `content` 文本;如果客户端没有开启 thinking / reasoning,思维链只用于检测,不会作为 `reasoning_content` 或可见正文暴露。只有正文为空且思维链里也没有可执行工具调用时,才继续按空回复错误处理。
|
||||
- OpenAI Chat / Responses 的空回复错误处理之前会默认做一次内部补偿重试:第一次上游完整结束后,如果最终可见正文为空、没有解析到工具调用、也没有已经向客户端流式发出工具调用,并且终止原因不是 `content_filter`,兼容层会复用同一个 `chat_session_id`、账号、token 与工具策略,把原始 completion `prompt` 追加固定后缀 `Previous reply had no visible output. Please regenerate the visible final answer or tool call now.` 后重新提交一次。重试遵循 DeepSeek 多轮对话协议:从第一次上游 SSE 流中提取 `response_message_id`,并在重试 payload 中设置 `parent_message_id` 为该值,使重试成为同一会话的后续轮次而非断裂的根消息;同时重新获取一次 PoW(若 PoW 获取失败则回退到原始 PoW)。该重试不会重新标准化消息、不会新建 session、不会切换账号,也不会向流式客户端插入重试标记;第二次 thinking / reasoning 会按正常增量直接接到第一次之后,并继续使用 overlap trim 去重。若第二次仍为空,终端错误码仍保持现有 `upstream_empty_output`;若任一尝试触发空 `content_filter`,不做补偿重试并保持 `content_filter` 错误。JS Vercel 运行时同样设置 `parent_message_id`,但因无法直接调用 PoW API 而复用原始 PoW。
|
||||
|
||||
- OpenAI Chat / Responses 在最终可见正文渲染阶段,会把 DeepSeek 搜索返回中的 `[citation:N]` / `[reference:N]` 标记替换成对应 Markdown 链接。`citation` 标记按一基序号解析;`reference` 标记只有在同一段正文中出现 `[reference:0]`(允许冒号后有空格)时才按零基序号映射,并且不会影响同段正文里的 `citation` 标记。
|
||||
|
||||
## 5. prompt 是怎么拼出来的
|
||||
|
||||
OpenAI Chat / Responses 在标准化后、current input file 之前,会默认执行 `thinking_injection` 增强。它参考 DeepSeek V4 “把控制指令放在 user 消息末尾更稳定”的用法,在最新 user message 后追加思考增强提示词。当前内置默认提示词以 `Reasoning Effort: Absolute maximum with no shortcuts permitted.` 开头,并继续要求模型充分分解问题、覆盖潜在路径与边界条件、把完整推演过程显式写出。该开关默认启用,可通过 `thinking_injection.enabled=false` 关闭;也可以通过 `thinking_injection.prompt` 自定义提示词,留空时使用内置默认提示词。
|
||||
|
||||
这段增强属于 prompt 可见上下文:
|
||||
|
||||
- 普通请求会直接出现在最终 `prompt` 的最新 user block 末尾。
|
||||
- 如果触发 current input file,它会进入完整上下文文件中。
|
||||
|
||||
另外,`MessagesPrepareWithThinking` 还会在最终 prompt 的最前面预置一段固定的 system 级“输出完整性约束(Output integrity guard)”:
|
||||
|
||||
- 如果上游上下文、工具输出或解析后的文本出现乱码、损坏、部分解析、重复或其他畸形片段,不要模仿、不要回显,只输出给用户的正确内容。
|
||||
- 这段约束位于普通 system / tool prompt 之前,因此是当前最终 prompt 里的最高优先级前置指令。
|
||||
|
||||
### 5.1 角色标记
|
||||
|
||||
最终 prompt 使用 DeepSeek 风格角色标记:
|
||||
|
||||
- `<|begin▁of▁sentence|>`
|
||||
- `<|System|>`
|
||||
- `<|User|>`
|
||||
- `<|Assistant|>`
|
||||
- `<|Tool|>`
|
||||
- `<|end▁of▁instructions|>`
|
||||
- `<|end▁of▁sentence|>`
|
||||
- `<|end▁of▁toolresults|>`
|
||||
|
||||
实现位置:
|
||||
[internal/prompt/messages.go](../internal/prompt/messages.go)
|
||||
|
||||
### 5.2 相邻同角色消息会合并
|
||||
|
||||
在最终 `MessagesPrepareWithThinking` 中,相邻同 role 的消息会被合并成一个块,中间插入空行。
|
||||
|
||||
这意味着:
|
||||
|
||||
- prompt 中看到的是“合并后的 role block”
|
||||
- 不是客户端传来的逐条 message 原样排列
|
||||
|
||||
## 6. tools 为什么是“文本注入”,不是原生下发
|
||||
|
||||
当前项目把工具能力视为“prompt 约束的一部分”。
|
||||
|
||||
具体做法:
|
||||
|
||||
1. 把每个 tool 的名称、描述、参数 schema 序列化成文本。
|
||||
2. 拼成 `You have access to these tools:` 大段说明。
|
||||
3. 再附上统一的 DSML tool call 外壳格式约束。
|
||||
4. 把这整段内容并入 system prompt。
|
||||
|
||||
工具调用正例现在优先示范官方 DSML 风格:`<|DSML|tool_calls>` → `<|DSML|invoke name="...">` → `<|DSML|parameter name="...">`。
|
||||
兼容层仍接受旧式纯 `<tool_calls>` wrapper,并会容错若干 DSML 标签变体,包括短横线形式 `<dsml-tool-calls>` / `<dsml-invoke>` / `<dsml-parameter>`;但提示词会优先要求模型输出官方 DSML 标签,并强调不能只输出 closing wrapper 而漏掉 opening tag。需要注意:这是“兼容 DSML 外壳,内部仍以 XML 解析语义为准”,不是原生 DSML 全链路实现;DSML 标签会在解析入口归一化回现有 XML 标签后继续走同一套 parser。
|
||||
数组参数使用 `<item>...</item>` 子节点表示;当某个参数体只包含 item 子节点时,Go / Node 解析器会把它还原成数组,避免 `questions` / `options` 这类 schema 中要求 array 的参数被误解析成 `{ "item": ... }` 对象。除此之外,解析器还会回收一些更松散的列表写法,例如 JSON array 字面量或逗号分隔的 JSON 项序列,只要它们足够明确;但 `<item>` 仍然是首选形态。若模型把完整结构化 XML fragment 误包进 CDATA,兼容层会在保护 `content` / `command` 等原文字段的前提下,尝试把非原文字段中的 CDATA XML fragment 还原成 object / array。不过,如果 CDATA 只是单个平面的 XML/HTML 标签,例如 `<b>urgent</b>` 这种行内标记,兼容层会保留原始字符串,不会强行升成 object / array;只有明显表示结构的 CDATA 片段,例如多兄弟节点、嵌套子节点或 `item` 列表,才会触发结构化恢复。对 `command` / `content` 等长文本参数,CDATA 内部的 Markdown fenced DSML / XML 示例会作为原文保护;示例里的 `]]></parameter>` 或 `</tool_calls>` 不会截断外层工具调用,解析器会继续等待围栏外真正的参数 / wrapper 结束标签。
|
||||
Go 侧读取 DeepSeek SSE 时不再依赖 `bufio.Scanner` 的固定 2MiB 单行上限;当写文件类工具把很长的 `content` 放在单个 `data:` 行里返回时,非流式收集、流式解析和 auto-continue 透传都会保留完整行,再进入同一套工具解析与序列化流程。
|
||||
在 assistant 最终回包阶段,如果某个 tool 参数在声明 schema 中明确是 `string`,兼容层会在把解析后的 `tool_calls` / `function_call` 重新序列化成 OpenAI / Responses / Claude 可见参数前,递归把该路径上的 number / bool / object / array 统一转成字符串;其中 object / array 会压成紧凑 JSON 字符串。这个保护只对 schema 明确声明为 string 的路径生效,不会改写本来就是 `number` / `boolean` / `object` / `array` 的参数。这样可以兼容 DeepSeek 输出了结构化片段、但上游客户端工具 schema 又严格要求字符串参数的场景(例如 `content`、`prompt`、`path`、`taskId` 等)。
|
||||
工具 schema 的权威来源始终是**当前请求实际携带的 schema**,而不是同名工具在其他 runtime(Claude Code / OpenCode / Codex 等)里的默认印象。兼容层现在会同时兼容 OpenAI 风格 `function.parameters`、直接工具对象上的 `parameters` / `input_schema`、以及 camelCase 的 `inputSchema` / `schema`,并在最终输出阶段按这份请求内 schema 决定是保留 array/object,还是仅对明确声明为 `string` 的路径做字符串化。该规则同样适用于 Claude 的流式收尾和 Vercel Node 流式 tool-call formatter,避免不同 runtime 因 schema shape 差异而出现同名工具参数类型漂移。
|
||||
正例中的工具名只会来自当前请求实际声明的工具;如果当前请求没有足够的已知工具形态,就省略对应的单工具、多工具或嵌套示例,避免把不可用工具名写进 prompt。
|
||||
对执行类工具,脚本内容必须进入执行参数本身:`Bash` / `execute_command` 使用 `command`,`exec_command` 使用 `cmd`;不要把脚本示范成 `path` / `content` 文件写入参数。
|
||||
如果当前请求声明了 `Read` / `read_file` 这类读取工具,兼容层会额外注入一条 read-tool cache guard:当读取结果只表示“文件未变更 / 已在历史中 / 请引用先前上下文 / 没有正文内容”时,模型必须把它视为内容不可用,不能反复调用同一个无正文读取;应改为请求完整正文读取能力,或向用户说明需要重新提供文件内容。这个约束只缓解客户端缓存返回空内容导致的死循环,DS2API 不会也无法凭空恢复客户端本地文件正文。
|
||||
|
||||
OpenAI 路径实现:
|
||||
[internal/promptcompat/tool_prompt.go](../internal/promptcompat/tool_prompt.go)
|
||||
|
||||
Claude 路径实现:
|
||||
[internal/httpapi/claude/handler_utils.go](../internal/httpapi/claude/handler_utils.go)
|
||||
|
||||
统一工具调用格式模板:
|
||||
[internal/toolcall/tool_prompt.go](../internal/toolcall/tool_prompt.go)
|
||||
|
||||
这也是项目“网页对话纯文本兼容”的关键设计:
|
||||
|
||||
- tools 对下游来说,本质上是 prompt 内规则
|
||||
- 不是 native tool schema transport
|
||||
|
||||
## 7. assistant 的 tool_calls / reasoning 如何保留
|
||||
|
||||
### 7.1 reasoning 保留方式
|
||||
|
||||
assistant 的 reasoning 会变成一个显式标签块:
|
||||
|
||||
```text
|
||||
[reasoning_content]
|
||||
...
|
||||
[/reasoning_content]
|
||||
```
|
||||
|
||||
然后再接可见回答正文。
|
||||
|
||||
### 7.2 历史 tool_calls 保留方式
|
||||
|
||||
assistant 历史 `tool_calls` 不会保留成 OpenAI 原生 JSON,而会转成 prompt 可见的 DSML 外壳:
|
||||
|
||||
```xml
|
||||
<|DSML|tool_calls>
|
||||
<|DSML|invoke name="read_file">
|
||||
<|DSML|parameter name="path"><![CDATA[src/main.go]]></|DSML|parameter>
|
||||
</|DSML|invoke>
|
||||
</|DSML|tool_calls>
|
||||
```
|
||||
|
||||
解析层同时兼容旧式纯 XML 形态:`<tool_calls>` / `<invoke>` / `<parameter>`。两者都会先归一到现有 XML 解析语义;其他旧格式都会作为普通文本保留,不会作为可执行调用语法。
|
||||
例外是 parser 会对一个非常窄的模型失误做修复:如果 assistant 输出了 `<invoke ...>` ... `</tool_calls>`(或 DSML 对应标签),但漏掉最前面的 opening wrapper,解析阶段会补回 wrapper 后再尝试识别。
|
||||
|
||||
这件事很重要,因为它决定了:
|
||||
|
||||
- 历史工具调用在 prompt 中是“可见文本历史”
|
||||
- 不是“隐藏结构化元数据”
|
||||
|
||||
实现位置:
|
||||
[internal/prompt/tool_calls.go](../internal/prompt/tool_calls.go)
|
||||
|
||||
### 7.3 tool result 保留方式
|
||||
|
||||
tool / function role 的结果会作为 `<|Tool|>...<|end▁of▁toolresults|>` 进入 prompt。
|
||||
|
||||
如果 tool content 为空,当前会补成字符串 `"null"`,避免整个 tool turn 丢失。
|
||||
|
||||
## 8. files、附件、systemprompt 文件的实际语义
|
||||
|
||||
这里要明确区分两类东西:
|
||||
|
||||
1. 文本型 system prompt
|
||||
例如 OpenAI `developer` / `system` / Responses `instructions` / Claude top-level `system`
|
||||
这类会进入 `prompt`。
|
||||
2. 文件型 systemprompt
|
||||
例如通过附件、`input_file`、base64、data URL 上传的文件
|
||||
这类不会直接内联进 `prompt`,而是进入 `ref_file_ids`。
|
||||
|
||||
OpenAI 文件相关实现:
|
||||
|
||||
- inline/base64/data URL 上传:
|
||||
[internal/httpapi/openai/files/file_inline_upload.go](../internal/httpapi/openai/files/file_inline_upload.go)
|
||||
- 文件 ID 收集:
|
||||
[internal/promptcompat/file_refs.go](../internal/promptcompat/file_refs.go)
|
||||
|
||||
OpenAI 的文件上传现在不再是“只传文件本体”的通用路径,而是会先根据请求里的 `model` 解析出 DeepSeek 的上传类型,并把它透传到上传接口的 `x-model-type`。当前可见的上传类型就是 `default` / `expert` / `vision`,其中 vision 请求上传图片时必须带上 `vision`,否则下游容易退回到仅文本或 OCR 语义。这个模型类型会同时用于:
|
||||
|
||||
- `/v1/files` 这类独立文件上传入口
|
||||
- Chat / Responses 的 inline 图片、附件上传
|
||||
- current input file 触发时生成的 `DS2API_HISTORY.txt` 上下文文件
|
||||
|
||||
也就是说,文件上传和完成请求的 `model_type` 现在是一致的:完成 payload 里仍然是 `model_type`,上传文件则会在 DeepSeek 上传阶段携带同样的模型类型信息。
|
||||
|
||||
结论:
|
||||
|
||||
- “systemprompt 文字”在 prompt 里
|
||||
- “systemprompt 文件”通常只在 `ref_file_ids` 里
|
||||
|
||||
除非调用方自己把文件内容展开后再塞进 system/developer 文本,否则文件内容不会自动出现在 prompt 正文。
|
||||
|
||||
## 9. 多轮历史为什么不会一直完整内联在 prompt
|
||||
|
||||
兼容层现在只保留 `current_input_file` 这一种拆分方式;旧的 `history_split` 配置字段已移除,读取旧配置时会忽略它且不会再写回。
|
||||
|
||||
- `current_input_file` 默认开启;它在统一 completion runtime 入口全局生效,用于把“完整上下文”合并进 `DS2API_HISTORY.txt` 上下文文件。当最新 user turn 的纯文本长度达到 `current_input_file.min_chars`(默认 `0`)时,runtime 会上传一个文件名为 `DS2API_HISTORY.txt` 的上下文文件。文件内容会先经过各协议入口的标准化,再序列化成按轮次编号的 `DS2API_HISTORY.txt` 风格 transcript,带有 `# DS2API_HISTORY.txt` 标题和 `=== N. ROLE ===` 分段;live prompt 中则会给出一个 continuation 语气的 user 消息,引导模型从 `DS2API_HISTORY.txt` 的最新状态继续推进,并直接回答最新请求,避免把任务拉回起点。
|
||||
- 如果 `current_input_file.enabled=false`,请求会直接透传,不上传任何拆分上下文文件。
|
||||
- 即使触发 `current_input_file` 后 live prompt 被缩短,对客户端回包里的上下文 token 统计,仍会沿用**拆分前的完整 prompt 语义**做计数,而不是按缩短后的占位 prompt 计算;否则会把真实上下文显著算小。
|
||||
|
||||
相关实现:
|
||||
|
||||
- 配置访问器:
|
||||
[internal/config/store_accessors.go](../internal/config/store_accessors.go)
|
||||
- 当前输入转文件:
|
||||
[internal/httpapi/openai/history/current_input_file.go](../internal/httpapi/openai/history/current_input_file.go)
|
||||
- 全局 completion runtime 应用点:
|
||||
[internal/completionruntime/nonstream.go](../internal/completionruntime/nonstream.go)
|
||||
|
||||
当前输入转文件启用并触发时,上传文件的真实文件名是 `DS2API_HISTORY.txt`,文件内容是完整 `messages` 上下文;它会使用 OpenAI-compatible 的消息/transcript 序列化规则和 DeepSeek 角色标记,再按轮次编号成 `DS2API_HISTORY.txt` 风格的 transcript(不再注入文件边界标签):
|
||||
|
||||
```text
|
||||
[uploaded filename]: DS2API_HISTORY.txt
|
||||
# DS2API_HISTORY.txt
|
||||
Prior conversation history and tool progress.
|
||||
|
||||
=== 1. SYSTEM ===
|
||||
...
|
||||
|
||||
=== 2. USER ===
|
||||
...
|
||||
|
||||
=== 3. ASSISTANT ===
|
||||
...
|
||||
|
||||
=== 4. TOOL ===
|
||||
...
|
||||
```
|
||||
|
||||
开启后,请求的 live prompt 不再直接内联完整上下文,而是保留一个 user role 的短提示,提示模型基于已提供上下文直接回答最新请求;上传后的 `file_id` 会进入 `ref_file_ids`。
|
||||
|
||||
## 10. 各协议入口的差异
|
||||
|
||||
### 10.1 OpenAI Chat / Responses
|
||||
|
||||
特点:
|
||||
|
||||
- `developer` 会映射到 `system`
|
||||
- Responses `instructions` 会 prepend 为 system message
|
||||
- `tools` 会注入 system prompt
|
||||
- `attachments` / `input_file` / inline 文件会进入 `ref_file_ids`
|
||||
- current input file 在统一 completion runtime 入口全局生效
|
||||
|
||||
### 10.2 Claude Messages
|
||||
|
||||
特点:
|
||||
|
||||
- top-level `system` 优先作为系统提示
|
||||
- `tool_use` / `tool_result` 会被转换成统一的 assistant/tool 历史语义
|
||||
- `tools` 同样会被并进 system prompt
|
||||
- 常规执行通过 `internal/httpapi/claude/handler_messages.go` 转到 OpenAI chat 路径,模型 alias 会先解析成 DeepSeek 原生模型
|
||||
- 当前代码里没有像 OpenAI 那样完整的 `ref_file_ids` 附件链路
|
||||
|
||||
### 10.3 Gemini
|
||||
|
||||
特点:
|
||||
|
||||
- `systemInstruction`、`contents.parts`、`functionCall`、`functionResponse` 会先归一
|
||||
- tools 会转成 OpenAI 风格 function schema
|
||||
- prompt 构建复用 OpenAI 的 `promptcompat.BuildOpenAIPromptForAdapter`
|
||||
- 未识别的非文本 part 会被安全序列化进 prompt,并对二进制/疑似 base64 内容做省略或截断处理
|
||||
|
||||
也就是说,Gemini 在“最终 prompt 语义”上,尽量和 OpenAI 保持一致。
|
||||
|
||||
## 11. 一份贴近真实的最终上下文示意
|
||||
|
||||
假设用户发来一个多轮请求:
|
||||
|
||||
- 有 system/developer 文本
|
||||
- 有 tools
|
||||
- 有一个文件型 systemprompt 附件
|
||||
- 有历史 assistant tool call / tool result
|
||||
- current input file 已触发
|
||||
|
||||
那么最终上下文更接近:
|
||||
|
||||
```json
|
||||
{
|
||||
"prompt": "<|begin▁of▁sentence|><|System|>原 system / developer\n\nYou have access to these tools: ...<|end▁of▁instructions|><|User|>Continue from the latest state in the attached DS2API_HISTORY.txt context. Treat it as the current working state and answer the latest user request directly.<|Assistant|>",
|
||||
"ref_file_ids": [
|
||||
"file-current-input-ignore",
|
||||
"file-systemprompt",
|
||||
"file-other-attachment"
|
||||
],
|
||||
"thinking_enabled": true,
|
||||
"search_enabled": false
|
||||
}
|
||||
```
|
||||
|
||||
这正是“API 转网页对话纯文本”的核心成果:
|
||||
|
||||
- 大部分结构化语义被压进 `prompt`
|
||||
- 文件保持文件
|
||||
- 需要时把完整上下文拆进 `DS2API_HISTORY.txt` 上下文文件,并按轮次编号成 transcript
|
||||
|
||||
## 12. 修改时必须同步本文档的场景
|
||||
|
||||
只要触碰以下任一类行为,就必须在同一提交或同一 PR 中更新本文档:
|
||||
|
||||
- 角色映射变更
|
||||
- system / developer / instructions 合并规则变更
|
||||
- assistant reasoning 保留格式变更
|
||||
- assistant 历史 `tool_calls` 的 XML 呈现方式变更
|
||||
- tool result 注入方式变更
|
||||
- tool prompt 模板或 tool_choice 约束变更
|
||||
- inline 文件上传 / 文件引用收集规则变更
|
||||
- current input file 触发条件、上传格式、`DS2API_HISTORY.txt` transcript 结构变更
|
||||
- 旧 `history_split` 字段忽略/清理行为变更
|
||||
- completion payload 字段语义变更
|
||||
- Claude / Gemini 对这套统一语义的复用关系变更
|
||||
|
||||
优先检查这些文件:
|
||||
|
||||
- `internal/promptcompat/request_normalize.go`
|
||||
- `internal/promptcompat/prompt_build.go`
|
||||
- `internal/promptcompat/message_normalize.go`
|
||||
- `internal/promptcompat/tool_prompt.go`
|
||||
- `internal/httpapi/openai/files/file_inline_upload.go`
|
||||
- `internal/promptcompat/file_refs.go`
|
||||
- `internal/httpapi/openai/history/current_input_file.go`
|
||||
- `internal/completionruntime/nonstream.go`
|
||||
- `internal/promptcompat/responses_input_normalize.go`
|
||||
- `internal/httpapi/claude/standard_request.go`
|
||||
- `internal/httpapi/claude/handler_utils.go`
|
||||
- `internal/httpapi/gemini/convert_request.go`
|
||||
- `internal/httpapi/gemini/convert_messages.go`
|
||||
- `internal/httpapi/gemini/convert_tools.go`
|
||||
- `internal/prompt/messages.go`
|
||||
- `internal/prompt/tool_calls.go`
|
||||
- `internal/promptcompat/standard_request.go`
|
||||
|
||||
## 13. 建议的最小验证
|
||||
|
||||
改动这条链路后,至少补齐或检查这些测试:
|
||||
|
||||
- `go test ./internal/prompt/...`
|
||||
- `go test ./internal/httpapi/openai/...`
|
||||
- `go test ./internal/httpapi/claude/...`
|
||||
- `go test ./internal/httpapi/gemini/...`
|
||||
- `go test ./internal/util/...`
|
||||
|
||||
如果改的是 tool call 相关兼容语义,还应同时检查:
|
||||
|
||||
- `go test ./internal/toolcall/...`
|
||||
- `node --test tests/node/stream-tool-sieve.test.js`
|
||||
|
||||
## 14. 文档同步约定
|
||||
|
||||
本文档是这条兼容链路的专项说明。
|
||||
|
||||
如果外部接口行为也变了,还应同步检查:
|
||||
|
||||
- [API.md](../API.md)
|
||||
- [API.en.md](../API.en.md)
|
||||
- [docs/toolcall-semantics.md](./toolcall-semantics.md)
|
||||
|
||||
原则是:
|
||||
|
||||
- 内部主链路变化,至少更新本文档
|
||||
- 外部可见契约变化,再同步更新 API 文档
|
||||
@@ -1,74 +1,106 @@
|
||||
# Tool call parsing semantics(Go/Node 统一语义)
|
||||
|
||||
本文档描述当前代码中工具调用解析链路的**实际行为**(以 `internal/toolcall` 与 `internal/js/helpers/stream-tool-sieve` 为准)。
|
||||
本文档描述当前代码中的**实际行为**,以 `internal/toolcall`、`internal/toolstream` 与 `internal/js/helpers/stream-tool-sieve` 为准。
|
||||
|
||||
文档导航:[总览](../README.MD) / [架构说明](./ARCHITECTURE.md) / [测试指南](./TESTING.md)
|
||||
|
||||
## 1) 当前输出结构
|
||||
## 1) 当前可执行格式
|
||||
|
||||
当前版本推荐模型输出 DSML 外壳:
|
||||
|
||||
```xml
|
||||
<|DSML|tool_calls>
|
||||
<|DSML|invoke name="read_file">
|
||||
<|DSML|parameter name="path"><![CDATA[README.MD]]></|DSML|parameter>
|
||||
</|DSML|invoke>
|
||||
</|DSML|tool_calls>
|
||||
```
|
||||
|
||||
兼容层仍接受旧式 canonical XML:
|
||||
|
||||
```xml
|
||||
<tool_calls>
|
||||
<invoke name="read_file">
|
||||
<parameter name="path"><![CDATA[README.MD]]></parameter>
|
||||
</invoke>
|
||||
</tool_calls>
|
||||
```
|
||||
|
||||
这不是原生 DSML 全链路实现。DSML 主要用于让模型有意识地输出协议标识,隔离普通 XML 语义;进入 parser 前会按固定本地标签名归一化成 `<tool_calls>` / `<invoke>` / `<parameter>`,内部仍以现有 XML 解析语义为准。
|
||||
|
||||
约束:
|
||||
|
||||
- 必须有 `<|DSML|tool_calls>...</|DSML|tool_calls>` 或 `<tool_calls>...</tool_calls>` wrapper
|
||||
- 每个调用必须在 `<|DSML|invoke name="...">...</|DSML|invoke>` 或 `<invoke name="...">...</invoke>` 内
|
||||
- 工具名必须放在 `invoke` 的 `name` 属性
|
||||
- 参数必须使用 `<|DSML|parameter name="...">...</|DSML|parameter>` 或 `<parameter name="...">...</parameter>`
|
||||
- 同一个工具块内不要混用 DSML 标签和旧 XML 工具标签;混搭会被视为非法工具块
|
||||
|
||||
兼容修复:
|
||||
|
||||
- 如果模型漏掉 opening wrapper,但后面仍输出了一个或多个 invoke 并以 closing wrapper 收尾,Go 解析链路会在解析前补回缺失的 opening wrapper。
|
||||
- Go / Node 解析层不再枚举每一种 DSML typo。它会把工具标签名前的 `DSML`、管道符 `|` / `|`、空白、重复 leading `<` 视为可容忍的协议噪声,然后只匹配固定本地标签名 `tool_calls` / `invoke` / `parameter`。例如 `<DSML|tool_calls>`、`<<|DSML|tool_calls>`、`<|DSML tool_calls>`、`<DSMLtool_calls>`、`<<DSML|DSML|tool_calls>` 都会归一化;相似但非固定标签名(如 `tool_calls_extra`)仍按普通文本处理。
|
||||
- 如果模型在固定工具标签名后多输出一个尾部管道符,例如 `<|DSML|tool_calls|` / `<|DSML|invoke|` / `<|DSML|parameter|`,兼容层会把这个尾部 `|` 当作异常标签终止符并补齐缺失的 `>`;如果后面已经有 `>`,也会消费这个多余 `|` 后再归一化。
|
||||
- 这是一个针对常见模型失误的窄修复,不改变推荐输出格式;prompt 仍要求模型直接输出完整 DSML 外壳。
|
||||
- 裸 `<invoke ...>` / `<parameter ...>` 不会被当成“已支持的工具语法”;只有 `tool_calls` wrapper 或可修复的缺失 opening wrapper 才会进入工具调用路径。
|
||||
|
||||
## 2) 非兼容内容
|
||||
|
||||
任何不满足上述 DSML / canonical XML 形态的内容,都会保留为普通文本,不会执行。一个例外是上一节提到的“缺失 opening wrapper、但 closing wrapper 仍存在”的窄修复场景。
|
||||
|
||||
当前 parser 不把 allow-list 当作硬安全边界:即使传入了已声明工具名列表,XML 里出现未声明工具名时也会尽量解析并交给上层协议输出;真正的执行侧仍必须自行校验工具名和参数。
|
||||
|
||||
## 3) 流式与防泄漏行为
|
||||
|
||||
在流式链路中(Go / Node 一致):
|
||||
|
||||
- DSML `<|DSML|tool_calls>` wrapper、短横线形式(如 `<dsml-tool-calls>` / `<dsml-invoke>` / `<dsml-parameter>`)、基于固定本地标签名的 DSML 噪声容错形态、尾部管道符形态(如 `<|DSML|tool_calls|`)和 canonical `<tool_calls>` wrapper 都会进入结构化捕获
|
||||
- 如果流里直接从 invoke 开始,但后面补上了 closing wrapper,Go 流式筛分也会按缺失 opening wrapper 的修复路径尝试恢复
|
||||
- 已识别成功的工具调用不会再次回流到普通文本
|
||||
- 不符合新格式的块不会执行,并继续按原样文本透传
|
||||
- fenced code block(反引号 `` ``` `` 和波浪线 `~~~`)中的 XML 示例始终按普通文本处理
|
||||
- 支持嵌套围栏(如 4 反引号嵌套 3 反引号)和 CDATA 内围栏保护
|
||||
- 对 `command` / `content` 等长文本参数,CDATA 内部如果包含 Markdown fenced DSML / XML 示例,即使示例里出现 `]]></parameter>` / `</tool_calls>` 这类看起来像外层结束标签的片段,也会继续按参数原文保留,直到真正位于围栏外的外层结束标签
|
||||
- 如果模型把 `<![CDATA[` 打开后却没有闭合,流式扫描阶段仍会保守地继续缓冲,不会误把 CDATA 里的示例 XML 当成真实工具调用;在最终 parse / flush 恢复阶段,会对这类 loose CDATA 做窄修复,尽量保住外层已完整包裹的真实工具调用
|
||||
- 当文本中 mention 了某种标签名(如 `<dsml|tool_calls>` 或 Markdown inline code 里的 `<|DSML|tool_calls>`)而后面紧跟真正工具调用时,sieve 会跳过不可解析的 mention 候选并继续匹配后续真实工具块,不会因 mention 导致工具调用丢失,也不会截断 mention 后的正文
|
||||
- Go 侧 SSE 读取不再使用 `bufio.Scanner` 的固定 token 上限;单个 `data:` 行中包含很长的写文件参数时,非流式收集、流式解析与 auto-continue 透传都应保留完整行,再交给 tool parser 处理
|
||||
|
||||
另外,`<parameter>` 的值如果本身是合法 JSON 字面量,也会按结构化值解析,而不是一律保留为字符串。例如 `123`、`true`、`null`、`[1,2]`、`{"a":1}` 都会还原成对应的 number / boolean / null / array / object。
|
||||
结构化 XML 参数也会还原为 JSON 结构:如果参数体只包含一个或多个 `<item>...</item>` 子节点,会输出数组;嵌套对象里的 item-only 字段也同样按数组处理。例如 `<parameter name="questions"><item><question>...</question></item></parameter>` 会输出 `{"questions":[{"question":"..."}]}`,而不是 `{"questions":{"item":...}}`。
|
||||
如果模型误把完整结构化 XML fragment 放进 CDATA,Go / Node 会先保护明显的原文字段(如 `content` / `command` / `prompt` / `old_string` / `new_string`),其余参数会尝试把 CDATA 内的完整 XML fragment 还原成 object / array;常见的 `<br>` 分隔符会按换行归一化后再解析。但如果 CDATA 只是单个平面的 XML/HTML 标签,例如 `<b>urgent</b>` 这种行内标记,兼容层会把它保留为原始字符串,而不会强行升成 object / array;只有明显表示结构的 CDATA 片段,例如多兄弟节点、嵌套子节点或 `item` 列表,才会触发结构化恢复。
|
||||
|
||||
## 4) 输出结构
|
||||
|
||||
`ParseToolCallsDetailed` / `parseToolCallsDetailed` 返回:
|
||||
|
||||
- `calls`:解析出的工具调用列表(`name` + `input`)。
|
||||
- `sawToolCallSyntax`:检测到工具调用语法特征时为 `true`。
|
||||
- `rejectedByPolicy`:当前实现固定为 `false`(预留字段)。
|
||||
- `rejectedToolNames`:当前实现固定为空数组(预留字段)。
|
||||
- `calls`:解析出的工具调用列表(`name` + `input`)
|
||||
- `sawToolCallSyntax`:检测到 DSML / canonical wrapper,或命中“缺失 opening wrapper 但可修复”的形态时会为 `true`;裸 `invoke` 不计入该标记
|
||||
- `rejectedByPolicy`:当前固定为 `false`
|
||||
- `rejectedToolNames`:当前固定为空数组
|
||||
|
||||
> 当前 `filterToolCallsDetailed` 仅做结构清洗,不做 allow-list 工具名硬拒绝。
|
||||
## 5) 落地建议
|
||||
|
||||
## 2) 解析范围(重点)
|
||||
1. Prompt 里只示范 DSML 外壳语法。
|
||||
2. 上游客户端应直接输出完整 DSML 外壳;DS2API 兼容旧式 canonical XML,并只对“closing tag 在、opening tag 漏掉”的常见失误做窄修复,不会泛化接受其他旧格式。
|
||||
3. 不要依赖 parser 做安全控制;执行器侧仍应做工具名和参数校验。
|
||||
|
||||
当前版本的可执行解析以 **XML/Markup 家族**为主:
|
||||
|
||||
- `<tool_call>...</tool_call>`
|
||||
- `<function_call>...</function_call>`
|
||||
- `<invoke ...>...</invoke>`(含自闭合)
|
||||
- `<tool_use>...</tool_use>`
|
||||
- antml 变体(如 `antml:function_call` / `antml:argument`)
|
||||
|
||||
并支持在这些标记块内部解析:
|
||||
|
||||
- JSON 参数字符串
|
||||
- 标签参数(`<parameter name="...">...`)
|
||||
- key/value 风格子标签
|
||||
|
||||
## 3) 不应再假设的行为
|
||||
|
||||
以下说法在当前实现中已不成立:
|
||||
|
||||
1. “纯 JSON `tool_calls` 片段会被直接当作可执行工具调用解析”。
|
||||
2. “存在 `toolcall.mode` / `toolcall.early_emit_confidence` 等可配置开关可以改变解析策略”。
|
||||
|
||||
当前策略在代码中固定为:
|
||||
|
||||
- 特征匹配开启(feature-match on)
|
||||
- 高置信度早发开启(early emit on)
|
||||
- policy 拒绝字段保留但未启用
|
||||
|
||||
## 4) 流式与防泄漏语义
|
||||
|
||||
在流式链路中(OpenAI / Claude / Gemini 统一内核):
|
||||
|
||||
- 工具调用片段会被优先提取为结构化增量输出;
|
||||
- 已识别的工具调用原始片段不会作为普通文本再次回流;
|
||||
- fenced code block 中的示例内容按文本处理,不作为可执行工具调用。
|
||||
|
||||
## 5) 落地建议(按当前实现)
|
||||
|
||||
1. Prompt 里优先约束模型输出 XML/Markup 工具块。
|
||||
2. 执行器侧继续做工具名白名单与参数 schema 校验(不要依赖 parser 代替安全策略)。
|
||||
3. 需要兼容历史“纯 JSON tool_calls”模型输出时,请在上游模板层把输出规范化为 XML/Markup 风格再进入 DS2API。
|
||||
|
||||
## 6) 回归验证建议
|
||||
## 6) 回归验证
|
||||
|
||||
可直接运行:
|
||||
|
||||
```bash
|
||||
go test -v -run 'TestParseToolCalls|TestRepair' ./internal/toolcall/
|
||||
go test -v -run 'TestParseToolCalls|TestProcessToolSieve' ./internal/toolcall ./internal/toolstream ./internal/httpapi/openai/...
|
||||
node --test tests/node/stream-tool-sieve.test.js
|
||||
```
|
||||
|
||||
重点覆盖:
|
||||
|
||||
- `<tool_call>` / `<function_call>` / `<invoke>` / `tool_use` / antml 变体
|
||||
- 参数 JSON 修复与解析
|
||||
- 流式增量下的工具调用提取与文本防泄漏
|
||||
- DSML `<|DSML|tool_calls>` wrapper 正常解析
|
||||
- legacy canonical `<tool_calls>` wrapper 正常解析
|
||||
- 固定本地标签名的 DSML 噪声容错形态(如 `<DSML|tool_calls>`、`<<|DSML|tool_calls>`、`<|DSML tool_calls>`、`<DSMLtool_calls>`、`<<DSML|DSML|tool_calls>`)正常解析
|
||||
- 混搭标签(DSML wrapper + canonical inner)归一化后正常解析
|
||||
- 波浪线围栏 `~~~` 内的示例不执行
|
||||
- 嵌套围栏(4 反引号嵌套 3 反引号)内的示例不执行
|
||||
- 文本 mention 标签名后紧跟真正工具调用的场景(含同一 wrapper 变体)
|
||||
- 非兼容内容按普通文本透传
|
||||
- 代码块示例不执行
|
||||
|
||||
3
go.mod
3
go.mod
@@ -6,10 +6,13 @@ require (
|
||||
github.com/andybalholm/brotli v1.2.1
|
||||
github.com/go-chi/chi/v5 v5.2.5
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hupe1980/go-tiktoken v0.0.10
|
||||
github.com/refraction-networking/utls v1.8.2
|
||||
github.com/router-for-me/CLIProxyAPI/v6 v6.9.14
|
||||
)
|
||||
|
||||
require github.com/dlclark/regexp2 v1.11.5 // indirect
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.18.5 // indirect
|
||||
github.com/sirupsen/logrus v1.9.4 // indirect
|
||||
|
||||
6
go.sum
6
go.sum
@@ -2,10 +2,14 @@ github.com/andybalholm/brotli v1.2.1 h1:R+f5xP285VArJDRgowrfb9DqL18yVK0gKAW/F+eT
|
||||
github.com/andybalholm/brotli v1.2.1/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
|
||||
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug=
|
||||
github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hupe1980/go-tiktoken v0.0.10 h1:m6phOJaGyctqWdGIgwn9X8AfJvaG74tnQoDL+ntOUEQ=
|
||||
github.com/hupe1980/go-tiktoken v0.0.10/go.mod h1:NME6d8hrE+Jo+kLUZHhXShYV8e40hYkm4BbSLQKtvAo=
|
||||
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
|
||||
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -37,6 +41,8 @@ golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
|
||||
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
|
||||
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
|
||||
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
|
||||
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package claude
|
||||
|
||||
import "testing"
|
||||
|
||||
type mockClaudeConfig struct {
|
||||
m map[string]string
|
||||
}
|
||||
|
||||
func (m mockClaudeConfig) ClaudeMapping() map[string]string { return m.m }
|
||||
func (mockClaudeConfig) CompatStripReferenceMarkers() bool { return true }
|
||||
|
||||
func TestNormalizeClaudeRequestUsesConfigInterfaceMapping(t *testing.T) {
|
||||
req := map[string]any{
|
||||
"model": "claude-opus-4-6",
|
||||
"messages": []any{
|
||||
map[string]any{"role": "user", "content": "hello"},
|
||||
},
|
||||
}
|
||||
out, err := normalizeClaudeRequest(mockClaudeConfig{
|
||||
m: map[string]string{
|
||||
"fast": "deepseek-chat",
|
||||
"slow": "deepseek-reasoner-search",
|
||||
},
|
||||
}, req)
|
||||
if err != nil {
|
||||
t.Fatalf("normalizeClaudeRequest error: %v", err)
|
||||
}
|
||||
if out.Standard.ResolvedModel != "deepseek-reasoner-search" {
|
||||
t.Fatalf("resolved model mismatch: got=%q", out.Standard.ResolvedModel)
|
||||
}
|
||||
if !out.Standard.Thinking || !out.Standard.Search {
|
||||
t.Fatalf("unexpected flags: thinking=%v search=%v", out.Standard.Thinking, out.Standard.Search)
|
||||
}
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
package claude
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type claudeProxyStoreStub struct {
|
||||
mapping map[string]string
|
||||
}
|
||||
|
||||
func (s claudeProxyStoreStub) ClaudeMapping() map[string]string {
|
||||
return s.mapping
|
||||
}
|
||||
|
||||
func (claudeProxyStoreStub) CompatStripReferenceMarkers() bool { return true }
|
||||
|
||||
type openAIProxyStub struct {
|
||||
status int
|
||||
body string
|
||||
}
|
||||
|
||||
func (s openAIProxyStub) ChatCompletions(w http.ResponseWriter, _ *http.Request) {
|
||||
if s.status == 0 {
|
||||
s.status = http.StatusOK
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(s.status)
|
||||
_, _ = w.Write([]byte(s.body))
|
||||
}
|
||||
|
||||
type openAIProxyCaptureStub struct {
|
||||
seenModel string
|
||||
seenReq map[string]any
|
||||
}
|
||||
|
||||
func (s *openAIProxyCaptureStub) ChatCompletions(w http.ResponseWriter, r *http.Request) {
|
||||
var req map[string]any
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
s.seenReq = req
|
||||
if m, ok := req["model"].(string); ok {
|
||||
s.seenModel = m
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte(`{"id":"ok","choices":[{"message":{"role":"assistant","content":"ok"}}]}`))
|
||||
}
|
||||
|
||||
func TestClaudeProxyViaOpenAIVercelPreparePassthrough(t *testing.T) {
|
||||
h := &Handler{OpenAI: openAIProxyStub{status: 200, body: `{"lease_id":"lease_123","payload":{"a":1}}`}}
|
||||
req := httptest.NewRequest(http.MethodPost, "/anthropic/v1/messages?__stream_prepare=1", strings.NewReader(`{"model":"claude-sonnet-4-5","messages":[{"role":"user","content":"hi"}],"stream":true}`))
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
h.Messages(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
var out map[string]any
|
||||
if err := json.Unmarshal(rec.Body.Bytes(), &out); err != nil {
|
||||
t.Fatalf("expected json response, got err=%v body=%s", err, rec.Body.String())
|
||||
}
|
||||
if _, ok := out["lease_id"]; !ok {
|
||||
t.Fatalf("expected lease_id in prepare passthrough, got=%v", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaudeProxyViaOpenAIPreservesClaudeMapping(t *testing.T) {
|
||||
openAI := &openAIProxyCaptureStub{}
|
||||
h := &Handler{
|
||||
Store: claudeProxyStoreStub{mapping: map[string]string{"fast": "deepseek-chat", "slow": "deepseek-reasoner"}},
|
||||
OpenAI: openAI,
|
||||
}
|
||||
req := httptest.NewRequest(http.MethodPost, "/anthropic/v1/messages", strings.NewReader(`{"model":"claude-3-opus","messages":[{"role":"user","content":"hi"}],"stream":false}`))
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
h.Messages(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if got := strings.TrimSpace(openAI.seenModel); got != "deepseek-reasoner" {
|
||||
t.Fatalf("expected mapped proxy model deepseek-reasoner, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaudeProxyTranslatesInlineImageToOpenAIDataURL(t *testing.T) {
|
||||
openAI := &openAIProxyCaptureStub{}
|
||||
h := &Handler{OpenAI: openAI}
|
||||
req := httptest.NewRequest(http.MethodPost, "/anthropic/v1/messages", strings.NewReader(`{"model":"claude-sonnet-4-5","messages":[{"role":"user","content":[{"type":"text","text":"hello"},{"type":"image","source":{"type":"base64","media_type":"image/png","data":"QUJDRA=="}}]}],"stream":false}`))
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
h.Messages(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
messages, _ := openAI.seenReq["messages"].([]any)
|
||||
if len(messages) != 1 {
|
||||
t.Fatalf("expected one translated message, got %#v", openAI.seenReq)
|
||||
}
|
||||
msg, _ := messages[0].(map[string]any)
|
||||
content, _ := msg["content"].([]any)
|
||||
if len(content) != 2 {
|
||||
t.Fatalf("expected translated content blocks, got %#v", msg)
|
||||
}
|
||||
imageBlock, _ := content[1].(map[string]any)
|
||||
if strings.TrimSpace(asString(imageBlock["type"])) != "image_url" {
|
||||
t.Fatalf("expected image_url block, got %#v", imageBlock)
|
||||
}
|
||||
imageURL, _ := imageBlock["image_url"].(map[string]any)
|
||||
if !strings.HasPrefix(strings.TrimSpace(asString(imageURL["url"])), "data:image/png;base64,") {
|
||||
t.Fatalf("expected translated data url, got %#v", imageBlock)
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
package claude
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"ds2api/internal/sse"
|
||||
streamengine "ds2api/internal/stream"
|
||||
)
|
||||
|
||||
type claudeStreamRuntime struct {
|
||||
w http.ResponseWriter
|
||||
rc *http.ResponseController
|
||||
canFlush bool
|
||||
|
||||
model string
|
||||
toolNames []string
|
||||
messages []any
|
||||
|
||||
thinkingEnabled bool
|
||||
searchEnabled bool
|
||||
bufferToolContent bool
|
||||
stripReferenceMarkers bool
|
||||
|
||||
messageID string
|
||||
thinking strings.Builder
|
||||
text strings.Builder
|
||||
|
||||
nextBlockIndex int
|
||||
thinkingBlockOpen bool
|
||||
thinkingBlockIndex int
|
||||
textBlockOpen bool
|
||||
textBlockIndex int
|
||||
ended bool
|
||||
upstreamErr string
|
||||
}
|
||||
|
||||
func newClaudeStreamRuntime(
|
||||
w http.ResponseWriter,
|
||||
rc *http.ResponseController,
|
||||
canFlush bool,
|
||||
model string,
|
||||
messages []any,
|
||||
thinkingEnabled bool,
|
||||
searchEnabled bool,
|
||||
stripReferenceMarkers bool,
|
||||
toolNames []string,
|
||||
) *claudeStreamRuntime {
|
||||
return &claudeStreamRuntime{
|
||||
w: w,
|
||||
rc: rc,
|
||||
canFlush: canFlush,
|
||||
model: model,
|
||||
messages: messages,
|
||||
thinkingEnabled: thinkingEnabled,
|
||||
searchEnabled: searchEnabled,
|
||||
bufferToolContent: len(toolNames) > 0,
|
||||
stripReferenceMarkers: stripReferenceMarkers,
|
||||
toolNames: toolNames,
|
||||
messageID: fmt.Sprintf("msg_%d", time.Now().UnixNano()),
|
||||
thinkingBlockIndex: -1,
|
||||
textBlockIndex: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *claudeStreamRuntime) onParsed(parsed sse.LineResult) streamengine.ParsedDecision {
|
||||
if !parsed.Parsed {
|
||||
return streamengine.ParsedDecision{}
|
||||
}
|
||||
if parsed.ErrorMessage != "" {
|
||||
s.upstreamErr = parsed.ErrorMessage
|
||||
return streamengine.ParsedDecision{Stop: true, StopReason: streamengine.StopReason("upstream_error")}
|
||||
}
|
||||
if parsed.Stop {
|
||||
return streamengine.ParsedDecision{Stop: true}
|
||||
}
|
||||
|
||||
contentSeen := false
|
||||
for _, p := range parsed.Parts {
|
||||
cleanedText := cleanVisibleOutput(p.Text, s.stripReferenceMarkers)
|
||||
if cleanedText == "" {
|
||||
continue
|
||||
}
|
||||
if p.Type != "thinking" && s.searchEnabled && sse.IsCitation(cleanedText) {
|
||||
continue
|
||||
}
|
||||
contentSeen = true
|
||||
|
||||
if p.Type == "thinking" {
|
||||
if !s.thinkingEnabled {
|
||||
continue
|
||||
}
|
||||
trimmed := sse.TrimContinuationOverlap(s.thinking.String(), cleanedText)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
s.thinking.WriteString(trimmed)
|
||||
s.closeTextBlock()
|
||||
if !s.thinkingBlockOpen {
|
||||
s.thinkingBlockIndex = s.nextBlockIndex
|
||||
s.nextBlockIndex++
|
||||
s.send("content_block_start", map[string]any{
|
||||
"type": "content_block_start",
|
||||
"index": s.thinkingBlockIndex,
|
||||
"content_block": map[string]any{
|
||||
"type": "thinking",
|
||||
"thinking": "",
|
||||
},
|
||||
})
|
||||
s.thinkingBlockOpen = true
|
||||
}
|
||||
s.send("content_block_delta", map[string]any{
|
||||
"type": "content_block_delta",
|
||||
"index": s.thinkingBlockIndex,
|
||||
"delta": map[string]any{
|
||||
"type": "thinking_delta",
|
||||
"thinking": trimmed,
|
||||
},
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
trimmed := sse.TrimContinuationOverlap(s.text.String(), cleanedText)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
s.text.WriteString(trimmed)
|
||||
if s.bufferToolContent {
|
||||
if hasUnclosedCodeFence(s.text.String()) {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
s.closeThinkingBlock()
|
||||
if !s.textBlockOpen {
|
||||
s.textBlockIndex = s.nextBlockIndex
|
||||
s.nextBlockIndex++
|
||||
s.send("content_block_start", map[string]any{
|
||||
"type": "content_block_start",
|
||||
"index": s.textBlockIndex,
|
||||
"content_block": map[string]any{
|
||||
"type": "text",
|
||||
"text": "",
|
||||
},
|
||||
})
|
||||
s.textBlockOpen = true
|
||||
}
|
||||
s.send("content_block_delta", map[string]any{
|
||||
"type": "content_block_delta",
|
||||
"index": s.textBlockIndex,
|
||||
"delta": map[string]any{
|
||||
"type": "text_delta",
|
||||
"text": trimmed,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return streamengine.ParsedDecision{ContentSeen: contentSeen}
|
||||
}
|
||||
|
||||
func hasUnclosedCodeFence(text string) bool {
|
||||
return strings.Count(text, "```")%2 == 1
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
package claude
|
||||
|
||||
import (
|
||||
"ds2api/internal/toolcall"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
streamengine "ds2api/internal/stream"
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
func (s *claudeStreamRuntime) closeThinkingBlock() {
|
||||
if !s.thinkingBlockOpen {
|
||||
return
|
||||
}
|
||||
s.send("content_block_stop", map[string]any{
|
||||
"type": "content_block_stop",
|
||||
"index": s.thinkingBlockIndex,
|
||||
})
|
||||
s.thinkingBlockOpen = false
|
||||
s.thinkingBlockIndex = -1
|
||||
}
|
||||
|
||||
func (s *claudeStreamRuntime) closeTextBlock() {
|
||||
if !s.textBlockOpen {
|
||||
return
|
||||
}
|
||||
s.send("content_block_stop", map[string]any{
|
||||
"type": "content_block_stop",
|
||||
"index": s.textBlockIndex,
|
||||
})
|
||||
s.textBlockOpen = false
|
||||
s.textBlockIndex = -1
|
||||
}
|
||||
|
||||
func (s *claudeStreamRuntime) finalize(stopReason string) {
|
||||
if s.ended {
|
||||
return
|
||||
}
|
||||
s.ended = true
|
||||
|
||||
s.closeThinkingBlock()
|
||||
s.closeTextBlock()
|
||||
|
||||
finalThinking := s.thinking.String()
|
||||
finalText := cleanVisibleOutput(s.text.String(), s.stripReferenceMarkers)
|
||||
|
||||
if s.bufferToolContent {
|
||||
detected := toolcall.ParseStandaloneToolCalls(finalText, s.toolNames)
|
||||
if len(detected) == 0 && finalText == "" && finalThinking != "" {
|
||||
detected = toolcall.ParseStandaloneToolCalls(finalThinking, s.toolNames)
|
||||
}
|
||||
if len(detected) > 0 {
|
||||
stopReason = "tool_use"
|
||||
for i, tc := range detected {
|
||||
idx := s.nextBlockIndex + i
|
||||
s.send("content_block_start", map[string]any{
|
||||
"type": "content_block_start",
|
||||
"index": idx,
|
||||
"content_block": map[string]any{
|
||||
"type": "tool_use",
|
||||
"id": fmt.Sprintf("toolu_%d_%d", time.Now().Unix(), idx),
|
||||
"name": tc.Name,
|
||||
"input": map[string]any{},
|
||||
},
|
||||
})
|
||||
|
||||
inputBytes, _ := json.Marshal(tc.Input)
|
||||
s.send("content_block_delta", map[string]any{
|
||||
"type": "content_block_delta",
|
||||
"index": idx,
|
||||
"delta": map[string]any{
|
||||
"type": "input_json_delta",
|
||||
"partial_json": string(inputBytes),
|
||||
},
|
||||
})
|
||||
|
||||
s.send("content_block_stop", map[string]any{
|
||||
"type": "content_block_stop",
|
||||
"index": idx,
|
||||
})
|
||||
}
|
||||
s.nextBlockIndex += len(detected)
|
||||
} else if finalText != "" {
|
||||
idx := s.nextBlockIndex
|
||||
s.nextBlockIndex++
|
||||
s.send("content_block_start", map[string]any{
|
||||
"type": "content_block_start",
|
||||
"index": idx,
|
||||
"content_block": map[string]any{
|
||||
"type": "text",
|
||||
"text": "",
|
||||
},
|
||||
})
|
||||
s.send("content_block_delta", map[string]any{
|
||||
"type": "content_block_delta",
|
||||
"index": idx,
|
||||
"delta": map[string]any{
|
||||
"type": "text_delta",
|
||||
"text": finalText,
|
||||
},
|
||||
})
|
||||
s.send("content_block_stop", map[string]any{
|
||||
"type": "content_block_stop",
|
||||
"index": idx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
outputTokens := util.EstimateTokens(finalThinking) + util.EstimateTokens(finalText)
|
||||
s.send("message_delta", map[string]any{
|
||||
"type": "message_delta",
|
||||
"delta": map[string]any{
|
||||
"stop_reason": stopReason,
|
||||
"stop_sequence": nil,
|
||||
},
|
||||
"usage": map[string]any{
|
||||
"output_tokens": outputTokens,
|
||||
},
|
||||
})
|
||||
s.send("message_stop", map[string]any{"type": "message_stop"})
|
||||
}
|
||||
|
||||
func (s *claudeStreamRuntime) onFinalize(reason streamengine.StopReason, scannerErr error) {
|
||||
if string(reason) == "upstream_error" {
|
||||
s.sendError(s.upstreamErr)
|
||||
return
|
||||
}
|
||||
if scannerErr != nil {
|
||||
s.sendError(scannerErr.Error())
|
||||
return
|
||||
}
|
||||
s.finalize("end_turn")
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"ds2api/internal/adapter/openai"
|
||||
"ds2api/internal/config"
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
//nolint:unused // kept for native Gemini adapter route compatibility.
|
||||
func normalizeGeminiRequest(store ConfigReader, routeModel string, req map[string]any, stream bool) (util.StandardRequest, error) {
|
||||
requestedModel := strings.TrimSpace(routeModel)
|
||||
if requestedModel == "" {
|
||||
return util.StandardRequest{}, fmt.Errorf("model is required in request path")
|
||||
}
|
||||
|
||||
resolvedModel, ok := config.ResolveModel(store, requestedModel)
|
||||
if !ok {
|
||||
return util.StandardRequest{}, fmt.Errorf("model %q is not available", requestedModel)
|
||||
}
|
||||
thinkingEnabled, searchEnabled, _ := config.GetModelConfig(resolvedModel)
|
||||
|
||||
messagesRaw := geminiMessagesFromRequest(req)
|
||||
if len(messagesRaw) == 0 {
|
||||
return util.StandardRequest{}, fmt.Errorf("request must include non-empty contents")
|
||||
}
|
||||
|
||||
toolsRaw := convertGeminiTools(req["tools"])
|
||||
finalPrompt, toolNames := openai.BuildPromptForAdapter(messagesRaw, toolsRaw, "", thinkingEnabled)
|
||||
passThrough := collectGeminiPassThrough(req)
|
||||
|
||||
return util.StandardRequest{
|
||||
Surface: "google_gemini",
|
||||
RequestedModel: requestedModel,
|
||||
ResolvedModel: resolvedModel,
|
||||
ResponseModel: requestedModel,
|
||||
Messages: messagesRaw,
|
||||
FinalPrompt: finalPrompt,
|
||||
ToolNames: toolNames,
|
||||
Stream: stream,
|
||||
Thinking: thinkingEnabled,
|
||||
Search: searchEnabled,
|
||||
PassThrough: passThrough,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"ds2api/internal/toolcall"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"ds2api/internal/sse"
|
||||
"ds2api/internal/translatorcliproxy"
|
||||
"ds2api/internal/util"
|
||||
|
||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
)
|
||||
|
||||
func (h *Handler) handleGenerateContent(w http.ResponseWriter, r *http.Request, stream bool) {
|
||||
if h.OpenAI == nil {
|
||||
writeGeminiError(w, http.StatusInternalServerError, "OpenAI proxy backend unavailable.")
|
||||
return
|
||||
}
|
||||
if h.proxyViaOpenAI(w, r, stream) {
|
||||
return
|
||||
}
|
||||
writeGeminiError(w, http.StatusBadGateway, "Failed to proxy Gemini request.")
|
||||
}
|
||||
|
||||
func (h *Handler) proxyViaOpenAI(w http.ResponseWriter, r *http.Request, stream bool) bool {
|
||||
raw, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeGeminiError(w, http.StatusBadRequest, "invalid body")
|
||||
return true
|
||||
}
|
||||
routeModel := strings.TrimSpace(chi.URLParam(r, "model"))
|
||||
translatedReq := translatorcliproxy.ToOpenAI(sdktranslator.FormatGemini, routeModel, raw, stream)
|
||||
if !strings.Contains(string(translatedReq), `"stream"`) {
|
||||
var reqMap map[string]any
|
||||
if json.Unmarshal(translatedReq, &reqMap) == nil {
|
||||
reqMap["stream"] = stream
|
||||
if b, e := json.Marshal(reqMap); e == nil {
|
||||
translatedReq = b
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
isVercelPrepare := strings.TrimSpace(r.URL.Query().Get("__stream_prepare")) == "1"
|
||||
isVercelRelease := strings.TrimSpace(r.URL.Query().Get("__stream_release")) == "1"
|
||||
|
||||
if isVercelRelease {
|
||||
proxyReq := r.Clone(r.Context())
|
||||
proxyReq.URL.Path = "/v1/chat/completions"
|
||||
proxyReq.Body = io.NopCloser(bytes.NewReader(raw))
|
||||
proxyReq.ContentLength = int64(len(raw))
|
||||
rec := httptest.NewRecorder()
|
||||
h.OpenAI.ChatCompletions(rec, proxyReq)
|
||||
res := rec.Result()
|
||||
defer func() { _ = res.Body.Close() }()
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
for k, vv := range res.Header {
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
w.WriteHeader(res.StatusCode)
|
||||
_, _ = w.Write(body)
|
||||
return true
|
||||
}
|
||||
|
||||
proxyReq := r.Clone(r.Context())
|
||||
proxyReq.URL.Path = "/v1/chat/completions"
|
||||
proxyReq.Body = io.NopCloser(bytes.NewReader(translatedReq))
|
||||
proxyReq.ContentLength = int64(len(translatedReq))
|
||||
|
||||
if stream && !isVercelPrepare {
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache, no-transform")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
w.Header().Set("X-Accel-Buffering", "no")
|
||||
streamWriter := translatorcliproxy.NewOpenAIStreamTranslatorWriter(w, sdktranslator.FormatGemini, routeModel, raw, translatedReq)
|
||||
h.OpenAI.ChatCompletions(streamWriter, proxyReq)
|
||||
return true
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
h.OpenAI.ChatCompletions(rec, proxyReq)
|
||||
res := rec.Result()
|
||||
defer func() { _ = res.Body.Close() }()
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
if res.StatusCode < 200 || res.StatusCode >= 300 {
|
||||
for k, vv := range res.Header {
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
writeGeminiErrorFromOpenAI(w, res.StatusCode, body)
|
||||
return true
|
||||
}
|
||||
if isVercelPrepare {
|
||||
for k, vv := range res.Header {
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
w.WriteHeader(res.StatusCode)
|
||||
_, _ = w.Write(body)
|
||||
return true
|
||||
}
|
||||
converted := translatorcliproxy.FromOpenAINonStream(sdktranslator.FormatGemini, routeModel, raw, translatedReq, body)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write(converted)
|
||||
return true
|
||||
}
|
||||
|
||||
func writeGeminiErrorFromOpenAI(w http.ResponseWriter, status int, raw []byte) {
|
||||
message := strings.TrimSpace(string(raw))
|
||||
var parsed map[string]any
|
||||
if err := json.Unmarshal(raw, &parsed); err == nil {
|
||||
if errObj, ok := parsed["error"].(map[string]any); ok {
|
||||
if msg, ok := errObj["message"].(string); ok && strings.TrimSpace(msg) != "" {
|
||||
message = strings.TrimSpace(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
if message == "" {
|
||||
message = http.StatusText(status)
|
||||
}
|
||||
writeGeminiError(w, status, message)
|
||||
}
|
||||
|
||||
//nolint:unused // retained for native Gemini non-stream handling path.
|
||||
func (h *Handler) handleNonStreamGenerateContent(w http.ResponseWriter, resp *http.Response, model, finalPrompt string, thinkingEnabled bool, toolNames []string) {
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
writeGeminiError(w, resp.StatusCode, strings.TrimSpace(string(body)))
|
||||
return
|
||||
}
|
||||
|
||||
result := sse.CollectStream(resp, thinkingEnabled, true)
|
||||
stripReferenceMarkers := h.compatStripReferenceMarkers()
|
||||
writeJSON(w, http.StatusOK, buildGeminiGenerateContentResponse(
|
||||
model,
|
||||
finalPrompt,
|
||||
cleanVisibleOutput(result.Thinking, stripReferenceMarkers),
|
||||
cleanVisibleOutput(result.Text, stripReferenceMarkers),
|
||||
toolNames,
|
||||
))
|
||||
}
|
||||
|
||||
//nolint:unused // retained for native Gemini non-stream handling path.
|
||||
func buildGeminiGenerateContentResponse(model, finalPrompt, finalThinking, finalText string, toolNames []string) map[string]any {
|
||||
parts := buildGeminiPartsFromFinal(finalText, finalThinking, toolNames)
|
||||
usage := buildGeminiUsage(finalPrompt, finalThinking, finalText)
|
||||
return map[string]any{
|
||||
"candidates": []map[string]any{
|
||||
{
|
||||
"index": 0,
|
||||
"content": map[string]any{
|
||||
"role": "model",
|
||||
"parts": parts,
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
},
|
||||
},
|
||||
"modelVersion": model,
|
||||
"usageMetadata": usage,
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unused // retained for native Gemini non-stream handling path.
|
||||
func buildGeminiUsage(finalPrompt, finalThinking, finalText string) map[string]any {
|
||||
promptTokens := util.EstimateTokens(finalPrompt)
|
||||
reasoningTokens := util.EstimateTokens(finalThinking)
|
||||
completionTokens := util.EstimateTokens(finalText)
|
||||
return map[string]any{
|
||||
"promptTokenCount": promptTokens,
|
||||
"candidatesTokenCount": reasoningTokens + completionTokens,
|
||||
"totalTokenCount": promptTokens + reasoningTokens + completionTokens,
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unused // retained for native Gemini non-stream handling path.
|
||||
func buildGeminiPartsFromFinal(finalText, finalThinking string, toolNames []string) []map[string]any {
|
||||
detected := toolcall.ParseToolCalls(finalText, toolNames)
|
||||
if len(detected) == 0 && finalThinking != "" {
|
||||
detected = toolcall.ParseToolCalls(finalThinking, toolNames)
|
||||
}
|
||||
if len(detected) > 0 {
|
||||
parts := make([]map[string]any, 0, len(detected))
|
||||
for _, tc := range detected {
|
||||
parts = append(parts, map[string]any{
|
||||
"functionCall": map[string]any{
|
||||
"name": tc.Name,
|
||||
"args": tc.Input,
|
||||
},
|
||||
})
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
text := finalText
|
||||
if text == "" {
|
||||
text = finalThinking
|
||||
}
|
||||
return []map[string]any{{"text": text}}
|
||||
}
|
||||
@@ -1,347 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"ds2api/internal/toolcall"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
openaifmt "ds2api/internal/format/openai"
|
||||
"ds2api/internal/sse"
|
||||
streamengine "ds2api/internal/stream"
|
||||
)
|
||||
|
||||
type chatStreamRuntime struct {
|
||||
w http.ResponseWriter
|
||||
rc *http.ResponseController
|
||||
canFlush bool
|
||||
|
||||
completionID string
|
||||
created int64
|
||||
model string
|
||||
finalPrompt string
|
||||
toolNames []string
|
||||
|
||||
thinkingEnabled bool
|
||||
searchEnabled bool
|
||||
stripReferenceMarkers bool
|
||||
|
||||
firstChunkSent bool
|
||||
bufferToolContent bool
|
||||
emitEarlyToolDeltas bool
|
||||
toolCallsEmitted bool
|
||||
toolCallsDoneEmitted bool
|
||||
|
||||
toolSieve toolStreamSieveState
|
||||
streamToolCallIDs map[int]string
|
||||
streamToolNames map[int]string
|
||||
thinking strings.Builder
|
||||
text strings.Builder
|
||||
|
||||
finalThinking string
|
||||
finalText string
|
||||
finalFinishReason string
|
||||
finalUsage map[string]any
|
||||
finalErrorStatus int
|
||||
finalErrorMessage string
|
||||
finalErrorCode string
|
||||
}
|
||||
|
||||
func newChatStreamRuntime(
|
||||
w http.ResponseWriter,
|
||||
rc *http.ResponseController,
|
||||
canFlush bool,
|
||||
completionID string,
|
||||
created int64,
|
||||
model string,
|
||||
finalPrompt string,
|
||||
thinkingEnabled bool,
|
||||
searchEnabled bool,
|
||||
stripReferenceMarkers bool,
|
||||
toolNames []string,
|
||||
bufferToolContent bool,
|
||||
emitEarlyToolDeltas bool,
|
||||
) *chatStreamRuntime {
|
||||
return &chatStreamRuntime{
|
||||
w: w,
|
||||
rc: rc,
|
||||
canFlush: canFlush,
|
||||
completionID: completionID,
|
||||
created: created,
|
||||
model: model,
|
||||
finalPrompt: finalPrompt,
|
||||
toolNames: toolNames,
|
||||
thinkingEnabled: thinkingEnabled,
|
||||
searchEnabled: searchEnabled,
|
||||
stripReferenceMarkers: stripReferenceMarkers,
|
||||
bufferToolContent: bufferToolContent,
|
||||
emitEarlyToolDeltas: emitEarlyToolDeltas,
|
||||
streamToolCallIDs: map[int]string{},
|
||||
streamToolNames: map[int]string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *chatStreamRuntime) sendKeepAlive() {
|
||||
if !s.canFlush {
|
||||
return
|
||||
}
|
||||
_, _ = s.w.Write([]byte(": keep-alive\n\n"))
|
||||
_ = s.rc.Flush()
|
||||
}
|
||||
|
||||
func (s *chatStreamRuntime) sendChunk(v any) {
|
||||
b, _ := json.Marshal(v)
|
||||
_, _ = s.w.Write([]byte("data: "))
|
||||
_, _ = s.w.Write(b)
|
||||
_, _ = s.w.Write([]byte("\n\n"))
|
||||
if s.canFlush {
|
||||
_ = s.rc.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *chatStreamRuntime) sendDone() {
|
||||
_, _ = s.w.Write([]byte("data: [DONE]\n\n"))
|
||||
if s.canFlush {
|
||||
_ = s.rc.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *chatStreamRuntime) sendFailedChunk(status int, message, code string) {
|
||||
s.finalErrorStatus = status
|
||||
s.finalErrorMessage = message
|
||||
s.finalErrorCode = code
|
||||
s.sendChunk(map[string]any{
|
||||
"status_code": status,
|
||||
"error": map[string]any{
|
||||
"message": message,
|
||||
"type": openAIErrorType(status),
|
||||
"code": code,
|
||||
"param": nil,
|
||||
},
|
||||
})
|
||||
s.sendDone()
|
||||
}
|
||||
|
||||
func (s *chatStreamRuntime) resetStreamToolCallState() {
|
||||
s.streamToolCallIDs = map[int]string{}
|
||||
s.streamToolNames = map[int]string{}
|
||||
}
|
||||
|
||||
func (s *chatStreamRuntime) finalize(finishReason string) {
|
||||
finalThinking := s.thinking.String()
|
||||
finalText := cleanVisibleOutput(s.text.String(), s.stripReferenceMarkers)
|
||||
s.finalThinking = finalThinking
|
||||
s.finalText = finalText
|
||||
detected := toolcall.ParseStandaloneToolCallsDetailed(finalText, s.toolNames)
|
||||
if len(detected.Calls) > 0 && !s.toolCallsDoneEmitted {
|
||||
finishReason = "tool_calls"
|
||||
delta := map[string]any{
|
||||
"tool_calls": formatFinalStreamToolCallsWithStableIDs(detected.Calls, s.streamToolCallIDs),
|
||||
}
|
||||
if !s.firstChunkSent {
|
||||
delta["role"] = "assistant"
|
||||
s.firstChunkSent = true
|
||||
}
|
||||
s.sendChunk(openaifmt.BuildChatStreamChunk(
|
||||
s.completionID,
|
||||
s.created,
|
||||
s.model,
|
||||
[]map[string]any{openaifmt.BuildChatStreamDeltaChoice(0, delta)},
|
||||
nil,
|
||||
))
|
||||
s.toolCallsEmitted = true
|
||||
s.toolCallsDoneEmitted = true
|
||||
} else if s.bufferToolContent {
|
||||
for _, evt := range flushToolSieve(&s.toolSieve, s.toolNames) {
|
||||
if len(evt.ToolCalls) > 0 {
|
||||
finishReason = "tool_calls"
|
||||
s.toolCallsEmitted = true
|
||||
s.toolCallsDoneEmitted = true
|
||||
tcDelta := map[string]any{
|
||||
"tool_calls": formatFinalStreamToolCallsWithStableIDs(evt.ToolCalls, s.streamToolCallIDs),
|
||||
}
|
||||
if !s.firstChunkSent {
|
||||
tcDelta["role"] = "assistant"
|
||||
s.firstChunkSent = true
|
||||
}
|
||||
s.sendChunk(openaifmt.BuildChatStreamChunk(
|
||||
s.completionID,
|
||||
s.created,
|
||||
s.model,
|
||||
[]map[string]any{openaifmt.BuildChatStreamDeltaChoice(0, tcDelta)},
|
||||
nil,
|
||||
))
|
||||
s.resetStreamToolCallState()
|
||||
}
|
||||
if evt.Content == "" {
|
||||
continue
|
||||
}
|
||||
cleaned := cleanVisibleOutput(evt.Content, s.stripReferenceMarkers)
|
||||
if cleaned == "" {
|
||||
continue
|
||||
}
|
||||
delta := map[string]any{
|
||||
"content": cleaned,
|
||||
}
|
||||
if !s.firstChunkSent {
|
||||
delta["role"] = "assistant"
|
||||
s.firstChunkSent = true
|
||||
}
|
||||
s.sendChunk(openaifmt.BuildChatStreamChunk(
|
||||
s.completionID,
|
||||
s.created,
|
||||
s.model,
|
||||
[]map[string]any{openaifmt.BuildChatStreamDeltaChoice(0, delta)},
|
||||
nil,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if len(detected.Calls) > 0 || s.toolCallsEmitted {
|
||||
finishReason = "tool_calls"
|
||||
}
|
||||
if len(detected.Calls) == 0 && !s.toolCallsEmitted && strings.TrimSpace(finalText) == "" {
|
||||
status := http.StatusTooManyRequests
|
||||
message := "Upstream model returned empty output."
|
||||
code := "upstream_empty_output"
|
||||
if strings.TrimSpace(finalThinking) != "" {
|
||||
message = "Upstream model returned reasoning without visible output."
|
||||
}
|
||||
if finishReason == "content_filter" {
|
||||
status = http.StatusBadRequest
|
||||
message = "Upstream content filtered the response and returned no output."
|
||||
code = "content_filter"
|
||||
}
|
||||
s.sendFailedChunk(status, message, code)
|
||||
return
|
||||
}
|
||||
usage := openaifmt.BuildChatUsage(s.finalPrompt, finalThinking, finalText)
|
||||
s.finalFinishReason = finishReason
|
||||
s.finalUsage = usage
|
||||
s.sendChunk(openaifmt.BuildChatStreamChunk(
|
||||
s.completionID,
|
||||
s.created,
|
||||
s.model,
|
||||
[]map[string]any{openaifmt.BuildChatStreamFinishChoice(0, finishReason)},
|
||||
usage,
|
||||
))
|
||||
s.sendDone()
|
||||
}
|
||||
|
||||
func (s *chatStreamRuntime) onParsed(parsed sse.LineResult) streamengine.ParsedDecision {
|
||||
if !parsed.Parsed {
|
||||
return streamengine.ParsedDecision{}
|
||||
}
|
||||
if parsed.ContentFilter {
|
||||
if strings.TrimSpace(s.text.String()) == "" {
|
||||
return streamengine.ParsedDecision{Stop: true, StopReason: streamengine.StopReason("content_filter")}
|
||||
}
|
||||
return streamengine.ParsedDecision{Stop: true, StopReason: streamengine.StopReasonHandlerRequested}
|
||||
}
|
||||
if parsed.ErrorMessage != "" {
|
||||
return streamengine.ParsedDecision{Stop: true, StopReason: streamengine.StopReason("content_filter")}
|
||||
}
|
||||
if parsed.Stop {
|
||||
return streamengine.ParsedDecision{Stop: true, StopReason: streamengine.StopReasonHandlerRequested}
|
||||
}
|
||||
|
||||
newChoices := make([]map[string]any, 0, len(parsed.Parts))
|
||||
contentSeen := false
|
||||
for _, p := range parsed.Parts {
|
||||
cleanedText := cleanVisibleOutput(p.Text, s.stripReferenceMarkers)
|
||||
if s.searchEnabled && sse.IsCitation(cleanedText) {
|
||||
continue
|
||||
}
|
||||
if cleanedText == "" {
|
||||
continue
|
||||
}
|
||||
contentSeen = true
|
||||
delta := map[string]any{}
|
||||
if !s.firstChunkSent {
|
||||
delta["role"] = "assistant"
|
||||
s.firstChunkSent = true
|
||||
}
|
||||
if p.Type == "thinking" {
|
||||
if s.thinkingEnabled {
|
||||
trimmed := sse.TrimContinuationOverlap(s.thinking.String(), cleanedText)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
s.thinking.WriteString(trimmed)
|
||||
delta["reasoning_content"] = trimmed
|
||||
}
|
||||
} else {
|
||||
trimmed := sse.TrimContinuationOverlap(s.text.String(), cleanedText)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
s.text.WriteString(trimmed)
|
||||
if !s.bufferToolContent {
|
||||
delta["content"] = trimmed
|
||||
} else {
|
||||
events := processToolSieveChunk(&s.toolSieve, trimmed, s.toolNames)
|
||||
for _, evt := range events {
|
||||
if len(evt.ToolCallDeltas) > 0 {
|
||||
if !s.emitEarlyToolDeltas {
|
||||
continue
|
||||
}
|
||||
filtered := filterIncrementalToolCallDeltasByAllowed(evt.ToolCallDeltas, s.streamToolNames)
|
||||
if len(filtered) == 0 {
|
||||
continue
|
||||
}
|
||||
formatted := formatIncrementalStreamToolCallDeltas(filtered, s.streamToolCallIDs)
|
||||
if len(formatted) == 0 {
|
||||
continue
|
||||
}
|
||||
tcDelta := map[string]any{
|
||||
"tool_calls": formatted,
|
||||
}
|
||||
s.toolCallsEmitted = true
|
||||
if !s.firstChunkSent {
|
||||
tcDelta["role"] = "assistant"
|
||||
s.firstChunkSent = true
|
||||
}
|
||||
newChoices = append(newChoices, openaifmt.BuildChatStreamDeltaChoice(0, tcDelta))
|
||||
continue
|
||||
}
|
||||
if len(evt.ToolCalls) > 0 {
|
||||
s.toolCallsEmitted = true
|
||||
s.toolCallsDoneEmitted = true
|
||||
tcDelta := map[string]any{
|
||||
"tool_calls": formatFinalStreamToolCallsWithStableIDs(evt.ToolCalls, s.streamToolCallIDs),
|
||||
}
|
||||
if !s.firstChunkSent {
|
||||
tcDelta["role"] = "assistant"
|
||||
s.firstChunkSent = true
|
||||
}
|
||||
newChoices = append(newChoices, openaifmt.BuildChatStreamDeltaChoice(0, tcDelta))
|
||||
s.resetStreamToolCallState()
|
||||
continue
|
||||
}
|
||||
if evt.Content != "" {
|
||||
cleaned := cleanVisibleOutput(evt.Content, s.stripReferenceMarkers)
|
||||
if cleaned == "" {
|
||||
continue
|
||||
}
|
||||
contentDelta := map[string]any{
|
||||
"content": cleaned,
|
||||
}
|
||||
if !s.firstChunkSent {
|
||||
contentDelta["role"] = "assistant"
|
||||
s.firstChunkSent = true
|
||||
}
|
||||
newChoices = append(newChoices, openaifmt.BuildChatStreamDeltaChoice(0, contentDelta))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(delta) > 0 {
|
||||
newChoices = append(newChoices, openaifmt.BuildChatStreamDeltaChoice(0, delta))
|
||||
}
|
||||
}
|
||||
|
||||
if len(newChoices) > 0 {
|
||||
s.sendChunk(openaifmt.BuildChatStreamChunk(s.completionID, s.created, s.model, newChoices, nil))
|
||||
}
|
||||
return streamengine.ParsedDecision{ContentSeen: contentSeen}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var citationMarkerPattern = regexp.MustCompile(`(?i)\[citation:\s*(\d+)\]`)
|
||||
|
||||
func replaceCitationMarkersWithLinks(text string, links map[int]string) string {
|
||||
if strings.TrimSpace(text) == "" || len(links) == 0 {
|
||||
return text
|
||||
}
|
||||
return citationMarkerPattern.ReplaceAllStringFunc(text, func(match string) string {
|
||||
sub := citationMarkerPattern.FindStringSubmatch(match)
|
||||
if len(sub) < 2 {
|
||||
return match
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSpace(sub[1]))
|
||||
if err != nil || idx <= 0 {
|
||||
return match
|
||||
}
|
||||
url := strings.TrimSpace(links[idx])
|
||||
if url == "" {
|
||||
return match
|
||||
}
|
||||
return fmt.Sprintf("[%d](%s)", idx, url)
|
||||
})
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package openai
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplaceCitationMarkersWithLinks(t *testing.T) {
|
||||
raw := "这是一条更新[citation:1],更多信息见[citation:2]。"
|
||||
links := map[int]string{
|
||||
1: "https://example.com/news-1",
|
||||
2: "https://example.com/news-2",
|
||||
}
|
||||
|
||||
got := replaceCitationMarkersWithLinks(raw, links)
|
||||
want := "这是一条更新[1](https://example.com/news-1),更多信息见[2](https://example.com/news-2)。"
|
||||
if got != want {
|
||||
t.Fatalf("expected %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceCitationMarkersWithLinksKeepsUnknownIndex(t *testing.T) {
|
||||
raw := "只有一个来源[citation:1],未知来源[citation:3]。"
|
||||
links := map[int]string{1: "https://example.com/a"}
|
||||
|
||||
got := replaceCitationMarkersWithLinks(raw, links)
|
||||
want := "只有一个来源[1](https://example.com/a),未知来源[citation:3]。"
|
||||
if got != want {
|
||||
t.Fatalf("expected %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package openai
|
||||
|
||||
import "testing"
|
||||
|
||||
type mockOpenAIConfig struct {
|
||||
aliases map[string]string
|
||||
wideInput bool
|
||||
autoDeleteMode string
|
||||
toolMode string
|
||||
earlyEmit string
|
||||
responsesTTL int
|
||||
embedProv string
|
||||
historySplitEnabled bool
|
||||
historySplitTurns int
|
||||
}
|
||||
|
||||
func (m mockOpenAIConfig) ModelAliases() map[string]string { return m.aliases }
|
||||
func (m mockOpenAIConfig) CompatWideInputStrictOutput() bool {
|
||||
return m.wideInput
|
||||
}
|
||||
func (m mockOpenAIConfig) CompatStripReferenceMarkers() bool { return true }
|
||||
func (m mockOpenAIConfig) ToolcallMode() string { return m.toolMode }
|
||||
func (m mockOpenAIConfig) ToolcallEarlyEmitConfidence() string { return m.earlyEmit }
|
||||
func (m mockOpenAIConfig) ResponsesStoreTTLSeconds() int { return m.responsesTTL }
|
||||
func (m mockOpenAIConfig) EmbeddingsProvider() string { return m.embedProv }
|
||||
func (m mockOpenAIConfig) AutoDeleteMode() string {
|
||||
if m.autoDeleteMode == "" {
|
||||
return "none"
|
||||
}
|
||||
return m.autoDeleteMode
|
||||
}
|
||||
func (m mockOpenAIConfig) AutoDeleteSessions() bool { return false }
|
||||
func (m mockOpenAIConfig) HistorySplitEnabled() bool { return m.historySplitEnabled }
|
||||
func (m mockOpenAIConfig) HistorySplitTriggerAfterTurns() int {
|
||||
if m.historySplitTurns <= 0 {
|
||||
return 1
|
||||
}
|
||||
return m.historySplitTurns
|
||||
}
|
||||
|
||||
func TestNormalizeOpenAIChatRequestWithConfigInterface(t *testing.T) {
|
||||
cfg := mockOpenAIConfig{
|
||||
aliases: map[string]string{
|
||||
"my-model": "deepseek-chat-search",
|
||||
},
|
||||
wideInput: true,
|
||||
}
|
||||
req := map[string]any{
|
||||
"model": "my-model",
|
||||
"messages": []any{map[string]any{"role": "user", "content": "hello"}},
|
||||
}
|
||||
out, err := normalizeOpenAIChatRequest(cfg, req, "")
|
||||
if err != nil {
|
||||
t.Fatalf("normalizeOpenAIChatRequest error: %v", err)
|
||||
}
|
||||
if out.ResolvedModel != "deepseek-chat-search" {
|
||||
t.Fatalf("resolved model mismatch: got=%q", out.ResolvedModel)
|
||||
}
|
||||
if !out.Search || out.Thinking {
|
||||
t.Fatalf("unexpected model flags: thinking=%v search=%v", out.Thinking, out.Search)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeOpenAIResponsesRequestWideInputPolicyFromInterface(t *testing.T) {
|
||||
req := map[string]any{
|
||||
"model": "deepseek-chat",
|
||||
"input": "hi",
|
||||
}
|
||||
|
||||
_, err := normalizeOpenAIResponsesRequest(mockOpenAIConfig{
|
||||
aliases: map[string]string{},
|
||||
wideInput: false,
|
||||
}, req, "")
|
||||
if err == nil {
|
||||
t.Fatal("expected error when wide input is disabled and only input is provided")
|
||||
}
|
||||
|
||||
out, err := normalizeOpenAIResponsesRequest(mockOpenAIConfig{
|
||||
aliases: map[string]string{},
|
||||
wideInput: true,
|
||||
}, req, "")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when wide input is enabled: %v", err)
|
||||
}
|
||||
if out.Surface != "openai_responses" {
|
||||
t.Fatalf("unexpected surface: %q", out.Surface)
|
||||
}
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"ds2api/internal/auth"
|
||||
"ds2api/internal/deepseek"
|
||||
)
|
||||
|
||||
const openAIUploadMaxMemory = 32 << 20
|
||||
|
||||
func (h *Handler) UploadFile(w http.ResponseWriter, r *http.Request) {
|
||||
a, err := h.Auth.Determine(r)
|
||||
if err != nil {
|
||||
status := http.StatusUnauthorized
|
||||
detail := err.Error()
|
||||
if err == auth.ErrNoAccount {
|
||||
status = http.StatusTooManyRequests
|
||||
}
|
||||
writeOpenAIError(w, status, detail)
|
||||
return
|
||||
}
|
||||
defer h.Auth.Release(a)
|
||||
if !strings.HasPrefix(strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type"))), "multipart/form-data") {
|
||||
writeOpenAIError(w, http.StatusBadRequest, "content-type must be multipart/form-data")
|
||||
return
|
||||
}
|
||||
// Enforce a hard cap on the total request body size to prevent OOM
|
||||
r.Body = http.MaxBytesReader(w, r.Body, openAIUploadMaxSize)
|
||||
if err := r.ParseMultipartForm(openAIUploadMaxMemory); err != nil {
|
||||
if strings.Contains(strings.ToLower(err.Error()), "too large") {
|
||||
writeOpenAIError(w, http.StatusRequestEntityTooLarge, "file size exceeds limit")
|
||||
return
|
||||
}
|
||||
writeOpenAIError(w, http.StatusBadRequest, "invalid multipart form")
|
||||
return
|
||||
}
|
||||
if r.MultipartForm != nil {
|
||||
defer func() { _ = r.MultipartForm.RemoveAll() }()
|
||||
}
|
||||
r = r.WithContext(auth.WithAuth(r.Context(), a))
|
||||
file, header, err := r.FormFile("file")
|
||||
if err != nil {
|
||||
writeOpenAIError(w, http.StatusBadRequest, "file is required")
|
||||
return
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
writeOpenAIError(w, http.StatusBadRequest, "failed to read uploaded file")
|
||||
return
|
||||
}
|
||||
contentType := strings.TrimSpace(header.Header.Get("Content-Type"))
|
||||
if contentType == "" && len(data) > 0 {
|
||||
contentType = http.DetectContentType(data)
|
||||
}
|
||||
result, err := h.DS.UploadFile(r.Context(), a, deepseek.UploadFileRequest{
|
||||
Filename: header.Filename,
|
||||
ContentType: contentType,
|
||||
Purpose: strings.TrimSpace(r.FormValue("purpose")),
|
||||
Data: data,
|
||||
}, 3)
|
||||
if err != nil {
|
||||
writeOpenAIError(w, http.StatusInternalServerError, "Failed to upload file.")
|
||||
return
|
||||
}
|
||||
if result != nil && result.AccountID == "" {
|
||||
result.AccountID = a.AccountID
|
||||
}
|
||||
writeJSON(w, http.StatusOK, buildOpenAIFileObject(result))
|
||||
}
|
||||
|
||||
func buildOpenAIFileObject(result *deepseek.UploadFileResult) map[string]any {
|
||||
if result == nil {
|
||||
obj := map[string]any{
|
||||
"id": "",
|
||||
"object": "file",
|
||||
"bytes": 0,
|
||||
"created_at": time.Now().Unix(),
|
||||
"filename": "",
|
||||
"purpose": "",
|
||||
"status": "uploaded",
|
||||
"status_details": nil,
|
||||
}
|
||||
return obj
|
||||
}
|
||||
obj := map[string]any{
|
||||
"id": result.ID,
|
||||
"object": "file",
|
||||
"bytes": result.Bytes,
|
||||
"created_at": time.Now().Unix(),
|
||||
"filename": result.Filename,
|
||||
"purpose": result.Purpose,
|
||||
"status": result.Status,
|
||||
"status_details": nil,
|
||||
}
|
||||
if result.AccountID != "" {
|
||||
obj["account_id"] = result.AccountID
|
||||
}
|
||||
return obj
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"ds2api/internal/auth"
|
||||
"ds2api/internal/chathistory"
|
||||
"ds2api/internal/config"
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// openAIUploadMaxSize limits total multipart request body size (100 MiB).
|
||||
openAIUploadMaxSize = 100 << 20
|
||||
// openAIGeneralMaxSize limits total JSON request body size (100 MiB).
|
||||
openAIGeneralMaxSize = 100 << 20
|
||||
)
|
||||
|
||||
// writeJSON is a package-internal alias kept to avoid mass-renaming across
|
||||
// every call-site in this package.
|
||||
var writeJSON = util.WriteJSON
|
||||
|
||||
type Handler struct {
|
||||
Store ConfigReader
|
||||
Auth AuthResolver
|
||||
DS DeepSeekCaller
|
||||
ChatHistory *chathistory.Store
|
||||
|
||||
leaseMu sync.Mutex
|
||||
streamLeases map[string]streamLease
|
||||
responsesMu sync.Mutex
|
||||
responses *responseStore
|
||||
}
|
||||
|
||||
func (h *Handler) compatStripReferenceMarkers() bool {
|
||||
if h == nil || h.Store == nil {
|
||||
return true
|
||||
}
|
||||
return h.Store.CompatStripReferenceMarkers()
|
||||
}
|
||||
|
||||
type streamLease struct {
|
||||
Auth *auth.RequestAuth
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
func RegisterRoutes(r chi.Router, h *Handler) {
|
||||
r.Get("/v1/models", h.ListModels)
|
||||
r.Get("/v1/models/{model_id}", h.GetModel)
|
||||
r.Post("/v1/chat/completions", h.ChatCompletions)
|
||||
r.Post("/v1/responses", h.Responses)
|
||||
r.Get("/v1/responses/{response_id}", h.GetResponseByID)
|
||||
r.Post("/v1/files", h.UploadFile)
|
||||
r.Post("/v1/embeddings", h.Embeddings)
|
||||
}
|
||||
|
||||
func (h *Handler) ListModels(w http.ResponseWriter, _ *http.Request) {
|
||||
writeJSON(w, http.StatusOK, config.OpenAIModelsResponse())
|
||||
}
|
||||
|
||||
func (h *Handler) GetModel(w http.ResponseWriter, r *http.Request) {
|
||||
modelID := strings.TrimSpace(chi.URLParam(r, "model_id"))
|
||||
model, ok := config.OpenAIModelByID(h.Store, modelID)
|
||||
if !ok {
|
||||
writeOpenAIError(w, http.StatusNotFound, "Model not found.")
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, model)
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"ds2api/internal/toolcall"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
func injectToolPrompt(messages []map[string]any, tools []any, policy util.ToolChoicePolicy) ([]map[string]any, []string) {
|
||||
if policy.IsNone() {
|
||||
return messages, nil
|
||||
}
|
||||
toolSchemas := make([]string, 0, len(tools))
|
||||
names := make([]string, 0, len(tools))
|
||||
isAllowed := func(name string) bool {
|
||||
if strings.TrimSpace(name) == "" {
|
||||
return false
|
||||
}
|
||||
if len(policy.Allowed) == 0 {
|
||||
return true
|
||||
}
|
||||
_, ok := policy.Allowed[name]
|
||||
return ok
|
||||
}
|
||||
|
||||
for _, t := range tools {
|
||||
tool, ok := t.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
fn, _ := tool["function"].(map[string]any)
|
||||
if len(fn) == 0 {
|
||||
fn = tool
|
||||
}
|
||||
name, _ := fn["name"].(string)
|
||||
desc, _ := fn["description"].(string)
|
||||
schema, _ := fn["parameters"].(map[string]any)
|
||||
name = strings.TrimSpace(name)
|
||||
if !isAllowed(name) {
|
||||
continue
|
||||
}
|
||||
names = append(names, name)
|
||||
if desc == "" {
|
||||
desc = "No description available"
|
||||
}
|
||||
b, _ := json.Marshal(schema)
|
||||
toolSchemas = append(toolSchemas, fmt.Sprintf("Tool: %s\nDescription: %s\nParameters: %s", name, desc, string(b)))
|
||||
}
|
||||
if len(toolSchemas) == 0 {
|
||||
return messages, names
|
||||
}
|
||||
toolPrompt := "You have access to these tools:\n\n" + strings.Join(toolSchemas, "\n\n") + "\n\n" + buildToolCallInstructions(names)
|
||||
if policy.Mode == util.ToolChoiceRequired {
|
||||
toolPrompt += "\n7) For this response, you MUST call at least one tool from the allowed list."
|
||||
}
|
||||
if policy.Mode == util.ToolChoiceForced && strings.TrimSpace(policy.ForcedName) != "" {
|
||||
toolPrompt += "\n7) For this response, you MUST call exactly this tool name: " + strings.TrimSpace(policy.ForcedName)
|
||||
toolPrompt += "\n8) Do not call any other tool."
|
||||
}
|
||||
|
||||
for i := range messages {
|
||||
if messages[i]["role"] == "system" {
|
||||
old, _ := messages[i]["content"].(string)
|
||||
messages[i]["content"] = strings.TrimSpace(old + "\n\n" + toolPrompt)
|
||||
return messages, names
|
||||
}
|
||||
}
|
||||
messages = append([]map[string]any{{"role": "system", "content": toolPrompt}}, messages...)
|
||||
return messages, names
|
||||
}
|
||||
|
||||
// buildToolCallInstructions delegates to the shared util implementation.
|
||||
func buildToolCallInstructions(toolNames []string) string {
|
||||
return toolcall.BuildToolCallInstructions(toolNames)
|
||||
}
|
||||
|
||||
func formatIncrementalStreamToolCallDeltas(deltas []toolCallDelta, ids map[int]string) []map[string]any {
|
||||
if len(deltas) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]map[string]any, 0, len(deltas))
|
||||
for _, d := range deltas {
|
||||
if d.Name == "" && d.Arguments == "" {
|
||||
continue
|
||||
}
|
||||
callID, ok := ids[d.Index]
|
||||
if !ok || callID == "" {
|
||||
callID = "call_" + strings.ReplaceAll(uuid.NewString(), "-", "")
|
||||
ids[d.Index] = callID
|
||||
}
|
||||
item := map[string]any{
|
||||
"index": d.Index,
|
||||
"id": callID,
|
||||
"type": "function",
|
||||
}
|
||||
fn := map[string]any{}
|
||||
if d.Name != "" {
|
||||
fn["name"] = d.Name
|
||||
}
|
||||
if d.Arguments != "" {
|
||||
fn["arguments"] = d.Arguments
|
||||
}
|
||||
if len(fn) > 0 {
|
||||
item["function"] = fn
|
||||
}
|
||||
out = append(out, item)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func filterIncrementalToolCallDeltasByAllowed(deltas []toolCallDelta, seenNames map[int]string) []toolCallDelta {
|
||||
if len(deltas) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]toolCallDelta, 0, len(deltas))
|
||||
for _, d := range deltas {
|
||||
if d.Name != "" {
|
||||
if seenNames != nil {
|
||||
seenNames[d.Index] = d.Name
|
||||
}
|
||||
out = append(out, d)
|
||||
continue
|
||||
}
|
||||
if seenNames == nil {
|
||||
out = append(out, d)
|
||||
continue
|
||||
}
|
||||
name := strings.TrimSpace(seenNames[d.Index])
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
out = append(out, d)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func formatFinalStreamToolCallsWithStableIDs(calls []toolcall.ParsedToolCall, ids map[int]string) []map[string]any {
|
||||
if len(calls) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]map[string]any, 0, len(calls))
|
||||
for i, c := range calls {
|
||||
callID := ""
|
||||
if ids != nil {
|
||||
callID = strings.TrimSpace(ids[i])
|
||||
}
|
||||
if callID == "" {
|
||||
callID = "call_" + strings.ReplaceAll(uuid.NewString(), "-", "")
|
||||
if ids != nil {
|
||||
ids[i] = callID
|
||||
}
|
||||
}
|
||||
args, _ := json.Marshal(c.Input)
|
||||
out = append(out, map[string]any{
|
||||
"index": i,
|
||||
"id": callID,
|
||||
"type": "function",
|
||||
"function": map[string]any{
|
||||
"name": c.Name,
|
||||
"arguments": string(args),
|
||||
},
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package openai
|
||||
|
||||
func (h *Handler) toolcallFeatureMatchEnabled() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *Handler) toolcallEarlyEmitHighConfidence() bool {
|
||||
return true
|
||||
}
|
||||
@@ -1,263 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func makeSSEHTTPResponse(lines ...string) *http.Response {
|
||||
body := strings.Join(lines, "\n")
|
||||
if !strings.HasSuffix(body, "\n") {
|
||||
body += "\n"
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(strings.NewReader(body)),
|
||||
}
|
||||
}
|
||||
|
||||
func decodeJSONBody(t *testing.T, body string) map[string]any {
|
||||
t.Helper()
|
||||
var out map[string]any
|
||||
if err := json.Unmarshal([]byte(body), &out); err != nil {
|
||||
t.Fatalf("decode json failed: %v, body=%s", err, body)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func parseSSEDataFrames(t *testing.T, body string) ([]map[string]any, bool) {
|
||||
t.Helper()
|
||||
lines := strings.Split(body, "\n")
|
||||
frames := make([]map[string]any, 0, len(lines))
|
||||
done := false
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if !strings.HasPrefix(line, "data:") {
|
||||
continue
|
||||
}
|
||||
payload := strings.TrimSpace(strings.TrimPrefix(line, "data:"))
|
||||
if payload == "" {
|
||||
continue
|
||||
}
|
||||
if payload == "[DONE]" {
|
||||
done = true
|
||||
continue
|
||||
}
|
||||
var frame map[string]any
|
||||
if err := json.Unmarshal([]byte(payload), &frame); err != nil {
|
||||
t.Fatalf("decode sse frame failed: %v, payload=%s", err, payload)
|
||||
}
|
||||
frames = append(frames, frame)
|
||||
}
|
||||
return frames, done
|
||||
}
|
||||
|
||||
func streamHasToolCallsDelta(frames []map[string]any) bool {
|
||||
for _, frame := range frames {
|
||||
choices, _ := frame["choices"].([]any)
|
||||
for _, item := range choices {
|
||||
choice, _ := item.(map[string]any)
|
||||
delta, _ := choice["delta"].(map[string]any)
|
||||
if _, ok := delta["tool_calls"]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func streamFinishReason(frames []map[string]any) string {
|
||||
for _, frame := range frames {
|
||||
choices, _ := frame["choices"].([]any)
|
||||
for _, item := range choices {
|
||||
choice, _ := item.(map[string]any)
|
||||
if reason, ok := choice["finish_reason"].(string); ok && reason != "" {
|
||||
return reason
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Backward-compatible alias for historical test name used in CI logs.
|
||||
func TestHandleNonStreamReturns429WhenUpstreamOutputEmpty(t *testing.T) {
|
||||
h := &Handler{}
|
||||
resp := makeSSEHTTPResponse(
|
||||
`data: {"p":"response/content","v":""}`,
|
||||
`data: [DONE]`,
|
||||
)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
h.handleNonStream(rec, resp, "cid-empty", "deepseek-chat", "prompt", false, false, nil, nil)
|
||||
if rec.Code != http.StatusTooManyRequests {
|
||||
t.Fatalf("expected status 429 for empty upstream output, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
out := decodeJSONBody(t, rec.Body.String())
|
||||
errObj, _ := out["error"].(map[string]any)
|
||||
if asString(errObj["code"]) != "upstream_empty_output" {
|
||||
t.Fatalf("expected code=upstream_empty_output, got %#v", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleNonStreamReturnsContentFilterErrorWhenUpstreamFilteredWithoutOutput(t *testing.T) {
|
||||
h := &Handler{}
|
||||
resp := makeSSEHTTPResponse(
|
||||
`data: {"code":"content_filter"}`,
|
||||
`data: [DONE]`,
|
||||
)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
h.handleNonStream(rec, resp, "cid-empty-filtered", "deepseek-chat", "prompt", false, false, nil, nil)
|
||||
if rec.Code != http.StatusBadRequest {
|
||||
t.Fatalf("expected status 400 for filtered upstream output, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
out := decodeJSONBody(t, rec.Body.String())
|
||||
errObj, _ := out["error"].(map[string]any)
|
||||
if asString(errObj["code"]) != "content_filter" {
|
||||
t.Fatalf("expected code=content_filter, got %#v", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleNonStreamReturns429WhenUpstreamHasOnlyThinking(t *testing.T) {
|
||||
h := &Handler{}
|
||||
resp := makeSSEHTTPResponse(
|
||||
`data: {"p":"response/thinking_content","v":"Only thinking"}`,
|
||||
`data: [DONE]`,
|
||||
)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
h.handleNonStream(rec, resp, "cid-thinking-only", "deepseek-reasoner", "prompt", true, false, nil, nil)
|
||||
if rec.Code != http.StatusTooManyRequests {
|
||||
t.Fatalf("expected status 429 for thinking-only upstream output, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
out := decodeJSONBody(t, rec.Body.String())
|
||||
errObj, _ := out["error"].(map[string]any)
|
||||
if asString(errObj["code"]) != "upstream_empty_output" {
|
||||
t.Fatalf("expected code=upstream_empty_output, got %#v", out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleStreamToolsPlainTextStreamsBeforeFinish(t *testing.T) {
|
||||
h := &Handler{}
|
||||
resp := makeSSEHTTPResponse(
|
||||
`data: {"p":"response/content","v":"你好,"}`,
|
||||
`data: {"p":"response/content","v":"这是普通文本回复。"}`,
|
||||
`data: [DONE]`,
|
||||
)
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", nil)
|
||||
|
||||
h.handleStream(rec, req, resp, "cid6", "deepseek-chat", "prompt", false, false, []string{"search"}, nil)
|
||||
|
||||
frames, done := parseSSEDataFrames(t, rec.Body.String())
|
||||
if !done {
|
||||
t.Fatalf("expected [DONE], body=%s", rec.Body.String())
|
||||
}
|
||||
if streamHasToolCallsDelta(frames) {
|
||||
t.Fatalf("did not expect tool_calls delta for plain text: %s", rec.Body.String())
|
||||
}
|
||||
content := strings.Builder{}
|
||||
for _, frame := range frames {
|
||||
choices, _ := frame["choices"].([]any)
|
||||
for _, item := range choices {
|
||||
choice, _ := item.(map[string]any)
|
||||
delta, _ := choice["delta"].(map[string]any)
|
||||
if c, ok := delta["content"].(string); ok {
|
||||
content.WriteString(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
if got := content.String(); got == "" {
|
||||
t.Fatalf("expected streamed content in tool mode plain text, body=%s", rec.Body.String())
|
||||
}
|
||||
if streamFinishReason(frames) != "stop" {
|
||||
t.Fatalf("expected finish_reason=stop, body=%s", rec.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleStreamIncompleteCapturedToolJSONFlushesAsTextOnFinalize(t *testing.T) {
|
||||
h := &Handler{}
|
||||
resp := makeSSEHTTPResponse(
|
||||
`data: {"p":"response/content","v":"{\"tool_calls\":[{\"name\":\"search\""}`,
|
||||
`data: [DONE]`,
|
||||
)
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", nil)
|
||||
|
||||
h.handleStream(rec, req, resp, "cid10", "deepseek-chat", "prompt", false, false, []string{"search"}, nil)
|
||||
|
||||
frames, done := parseSSEDataFrames(t, rec.Body.String())
|
||||
if !done {
|
||||
t.Fatalf("expected [DONE], body=%s", rec.Body.String())
|
||||
}
|
||||
if streamHasToolCallsDelta(frames) {
|
||||
t.Fatalf("did not expect tool_calls delta for incomplete json, body=%s", rec.Body.String())
|
||||
}
|
||||
content := strings.Builder{}
|
||||
for _, frame := range frames {
|
||||
choices, _ := frame["choices"].([]any)
|
||||
for _, item := range choices {
|
||||
choice, _ := item.(map[string]any)
|
||||
delta, _ := choice["delta"].(map[string]any)
|
||||
if c, ok := delta["content"].(string); ok {
|
||||
content.WriteString(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !strings.Contains(strings.ToLower(content.String()), "tool_calls") || !strings.Contains(content.String(), "{") {
|
||||
t.Fatalf("expected incomplete capture to flush as plain text instead of stalling, got=%q", content.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleStreamEmitsDistinctToolCallIDsAcrossSeparateToolBlocks(t *testing.T) {
|
||||
h := &Handler{}
|
||||
resp := makeSSEHTTPResponse(
|
||||
`data: {"p":"response/content","v":"前置文本\n<tool_calls>\n <tool_call>\n <tool_name>read_file</tool_name>\n <parameters>{\"path\":\"README.MD\"}</parameters>\n </tool_call>\n</tool_calls>"}`,
|
||||
`data: {"p":"response/content","v":"中间文本\n<tool_calls>\n <tool_call>\n <tool_name>search</tool_name>\n <parameters>{\"q\":\"golang\"}</parameters>\n </tool_call>\n</tool_calls>"}`,
|
||||
`data: [DONE]`,
|
||||
)
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", nil)
|
||||
|
||||
h.handleStream(rec, req, resp, "cid-multi", "deepseek-chat", "prompt", false, false, []string{"read_file", "search"}, nil)
|
||||
|
||||
frames, done := parseSSEDataFrames(t, rec.Body.String())
|
||||
if !done {
|
||||
t.Fatalf("expected [DONE], body=%s", rec.Body.String())
|
||||
}
|
||||
|
||||
ids := make([]string, 0, 2)
|
||||
seen := make(map[string]struct{})
|
||||
for _, frame := range frames {
|
||||
choices, _ := frame["choices"].([]any)
|
||||
for _, item := range choices {
|
||||
choice, _ := item.(map[string]any)
|
||||
delta, _ := choice["delta"].(map[string]any)
|
||||
toolCalls, _ := delta["tool_calls"].([]any)
|
||||
for _, rawCall := range toolCalls {
|
||||
call, _ := rawCall.(map[string]any)
|
||||
id := asString(call["id"])
|
||||
if id == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[id]; ok {
|
||||
continue
|
||||
}
|
||||
seen[id] = struct{}{}
|
||||
ids = append(ids, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(ids) != 2 {
|
||||
t.Fatalf("expected two distinct tool call ids, got %#v body=%s", ids, rec.Body.String())
|
||||
}
|
||||
if ids[0] == ids[1] {
|
||||
t.Fatalf("expected distinct tool call ids across blocks, got %#v body=%s", ids, rec.Body.String())
|
||||
}
|
||||
}
|
||||
@@ -1,289 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"ds2api/internal/auth"
|
||||
"ds2api/internal/deepseek"
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
const (
|
||||
historySplitFilename = "HISTORY.txt"
|
||||
historySplitContentType = "text/plain; charset=utf-8"
|
||||
historySplitPurpose = "assistants"
|
||||
)
|
||||
|
||||
func (h *Handler) applyHistorySplit(ctx context.Context, a *auth.RequestAuth, stdReq util.StandardRequest) (util.StandardRequest, error) {
|
||||
if h == nil || h.DS == nil || h.Store == nil || a == nil {
|
||||
return stdReq, nil
|
||||
}
|
||||
if !h.Store.HistorySplitEnabled() {
|
||||
return stdReq, nil
|
||||
}
|
||||
|
||||
promptMessages, historyMessages := splitOpenAIHistoryMessages(stdReq.Messages, h.Store.HistorySplitTriggerAfterTurns())
|
||||
if len(historyMessages) == 0 {
|
||||
return stdReq, nil
|
||||
}
|
||||
|
||||
reasoningContent := extractHistorySplitReasoningContent(historyMessages)
|
||||
historyText := buildOpenAIHistoryTranscript(historyMessages)
|
||||
if strings.TrimSpace(historyText) == "" {
|
||||
return stdReq, errors.New("history split produced empty transcript")
|
||||
}
|
||||
|
||||
result, err := h.DS.UploadFile(ctx, a, deepseek.UploadFileRequest{
|
||||
Filename: historySplitFilename,
|
||||
ContentType: historySplitContentType,
|
||||
Purpose: historySplitPurpose,
|
||||
Data: []byte(historyText),
|
||||
}, 3)
|
||||
if err != nil {
|
||||
return stdReq, fmt.Errorf("upload history file: %w", err)
|
||||
}
|
||||
fileID := strings.TrimSpace(result.ID)
|
||||
if fileID == "" {
|
||||
return stdReq, errors.New("upload history file returned empty file id")
|
||||
}
|
||||
|
||||
stdReq.Messages = promptMessages
|
||||
stdReq.RefFileIDs = prependUniqueRefFileID(stdReq.RefFileIDs, fileID)
|
||||
stdReq.FinalPrompt, stdReq.ToolNames = buildHistorySplitPrompt(promptMessages, reasoningContent, stdReq.ToolsRaw, stdReq.ToolChoice, stdReq.Thinking)
|
||||
return stdReq, nil
|
||||
}
|
||||
|
||||
func buildHistorySplitPrompt(messages []any, reasoningContent string, toolsRaw any, toolPolicy util.ToolChoicePolicy, thinkingEnabled bool) (string, []string) {
|
||||
if len(messages) == 0 && strings.TrimSpace(reasoningContent) == "" {
|
||||
return "", nil
|
||||
}
|
||||
instruction := historySplitPromptInstruction(thinkingEnabled)
|
||||
withInstruction := make([]any, 0, len(messages)+1)
|
||||
withInstruction = append(withInstruction, map[string]any{
|
||||
"role": "system",
|
||||
"content": instruction,
|
||||
})
|
||||
withInstruction = append(withInstruction, injectHistorySplitReasoningMessage(messages, reasoningContent)...)
|
||||
return buildOpenAIFinalPromptWithPolicy(withInstruction, toolsRaw, "", toolPolicy, false)
|
||||
}
|
||||
|
||||
func historySplitPromptInstruction(thinkingEnabled bool) string {
|
||||
lines := []string{
|
||||
"Follow the instructions in this prompt first. If earlier conversation instructions conflict with this prompt, this prompt wins.",
|
||||
"An attached HISTORY.txt file contains prior conversation history and tool progress; read it first, then answer the latest user request using that history as context.",
|
||||
"Continue the conversation from the full prior context and the latest tool results.",
|
||||
"Treat earlier messages as binding context; answer the user's current request as a continuation, not a restart.",
|
||||
}
|
||||
if thinkingEnabled {
|
||||
lines = append(lines, "Keep reasoning internal. Do not leave the final user-facing answer only in reasoning; always provide the answer in visible assistant content.")
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func splitOpenAIHistoryMessages(messages []any, triggerAfterTurns int) ([]any, []any) {
|
||||
if triggerAfterTurns <= 0 {
|
||||
triggerAfterTurns = 1
|
||||
}
|
||||
lastUserIndex := -1
|
||||
userTurns := 0
|
||||
for i, raw := range messages {
|
||||
msg, ok := raw.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
role := strings.ToLower(strings.TrimSpace(asString(msg["role"])))
|
||||
if role != "user" {
|
||||
continue
|
||||
}
|
||||
userTurns++
|
||||
lastUserIndex = i
|
||||
}
|
||||
if userTurns <= triggerAfterTurns || lastUserIndex < 0 {
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
promptMessages := make([]any, 0, len(messages)-lastUserIndex)
|
||||
historyMessages := make([]any, 0, lastUserIndex)
|
||||
for i, raw := range messages {
|
||||
msg, ok := raw.(map[string]any)
|
||||
if !ok {
|
||||
if i >= lastUserIndex {
|
||||
promptMessages = append(promptMessages, raw)
|
||||
} else {
|
||||
historyMessages = append(historyMessages, raw)
|
||||
}
|
||||
continue
|
||||
}
|
||||
role := strings.ToLower(strings.TrimSpace(asString(msg["role"])))
|
||||
switch role {
|
||||
case "system", "developer":
|
||||
promptMessages = append(promptMessages, raw)
|
||||
default:
|
||||
if i >= lastUserIndex {
|
||||
promptMessages = append(promptMessages, raw)
|
||||
} else {
|
||||
historyMessages = append(historyMessages, raw)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(promptMessages) == 0 {
|
||||
return messages, nil
|
||||
}
|
||||
return promptMessages, historyMessages
|
||||
}
|
||||
|
||||
func buildOpenAIHistoryTranscript(messages []any) string {
|
||||
var b strings.Builder
|
||||
b.WriteString("# HISTORY.txt\n")
|
||||
b.WriteString("Prior conversation history and tool progress.\n\n")
|
||||
|
||||
entry := 0
|
||||
for _, raw := range messages {
|
||||
msg, ok := raw.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
role := strings.ToLower(strings.TrimSpace(asString(msg["role"])))
|
||||
content := buildOpenAIHistoryEntry(role, msg)
|
||||
if strings.TrimSpace(content) == "" {
|
||||
continue
|
||||
}
|
||||
entry++
|
||||
fmt.Fprintf(&b, "=== %d. %s ===\n%s\n\n", entry, strings.ToUpper(roleLabelForHistory(role)), content)
|
||||
}
|
||||
return strings.TrimSpace(b.String()) + "\n"
|
||||
}
|
||||
|
||||
func buildOpenAIHistoryEntry(role string, msg map[string]any) string {
|
||||
switch role {
|
||||
case "assistant":
|
||||
return strings.TrimSpace(buildAssistantHistoryContent(msg))
|
||||
case "tool", "function":
|
||||
return strings.TrimSpace(buildToolHistoryContent(msg))
|
||||
case "user":
|
||||
return strings.TrimSpace(normalizeOpenAIContentForPrompt(msg["content"]))
|
||||
default:
|
||||
return strings.TrimSpace(normalizeOpenAIContentForPrompt(msg["content"]))
|
||||
}
|
||||
}
|
||||
|
||||
func buildAssistantHistoryContent(msg map[string]any) string {
|
||||
return strings.TrimSpace(buildAssistantContentForPrompt(msg))
|
||||
}
|
||||
|
||||
func buildToolHistoryContent(msg map[string]any) string {
|
||||
content := strings.TrimSpace(normalizeOpenAIContentForPrompt(msg["content"]))
|
||||
parts := make([]string, 0, 2)
|
||||
if name := strings.TrimSpace(asString(msg["name"])); name != "" {
|
||||
parts = append(parts, "name="+name)
|
||||
}
|
||||
if callID := strings.TrimSpace(asString(msg["tool_call_id"])); callID != "" {
|
||||
parts = append(parts, "tool_call_id="+callID)
|
||||
}
|
||||
header := ""
|
||||
if len(parts) > 0 {
|
||||
header = "[" + strings.Join(parts, " ") + "]"
|
||||
}
|
||||
switch {
|
||||
case header != "" && content != "":
|
||||
return header + "\n" + content
|
||||
case header != "":
|
||||
return header
|
||||
default:
|
||||
return content
|
||||
}
|
||||
}
|
||||
|
||||
func extractHistorySplitReasoningContent(messages []any) string {
|
||||
for i := len(messages) - 1; i >= 0; i-- {
|
||||
msg, ok := messages[i].(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
role := strings.ToLower(strings.TrimSpace(asString(msg["role"])))
|
||||
if role != "assistant" {
|
||||
continue
|
||||
}
|
||||
reasoning := strings.TrimSpace(normalizeOpenAIReasoningContentForPrompt(msg["reasoning_content"]))
|
||||
if reasoning == "" {
|
||||
reasoning = strings.TrimSpace(extractOpenAIReasoningContentFromMessage(msg["content"]))
|
||||
}
|
||||
if reasoning != "" {
|
||||
return reasoning
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func injectHistorySplitReasoningMessage(messages []any, reasoningContent string) []any {
|
||||
reasoningContent = strings.TrimSpace(reasoningContent)
|
||||
if reasoningContent == "" {
|
||||
return messages
|
||||
}
|
||||
reasoningMsg := map[string]any{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"reasoning_content": reasoningContent,
|
||||
}
|
||||
lastUserIndex := lastOpenAIUserMessageIndex(messages)
|
||||
if lastUserIndex < 0 {
|
||||
out := make([]any, 0, len(messages)+1)
|
||||
out = append(out, reasoningMsg)
|
||||
out = append(out, messages...)
|
||||
return out
|
||||
}
|
||||
out := make([]any, 0, len(messages)+1)
|
||||
for i, raw := range messages {
|
||||
if i == lastUserIndex {
|
||||
out = append(out, reasoningMsg)
|
||||
}
|
||||
out = append(out, raw)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func lastOpenAIUserMessageIndex(messages []any) int {
|
||||
last := -1
|
||||
for i, raw := range messages {
|
||||
msg, ok := raw.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(strings.TrimSpace(asString(msg["role"]))) == "user" {
|
||||
last = i
|
||||
}
|
||||
}
|
||||
return last
|
||||
}
|
||||
|
||||
func roleLabelForHistory(role string) string {
|
||||
role = strings.ToLower(strings.TrimSpace(role))
|
||||
switch role {
|
||||
case "function":
|
||||
return "tool"
|
||||
case "":
|
||||
return "unknown"
|
||||
default:
|
||||
return role
|
||||
}
|
||||
}
|
||||
|
||||
func prependUniqueRefFileID(existing []string, fileID string) []string {
|
||||
fileID = strings.TrimSpace(fileID)
|
||||
if fileID == "" {
|
||||
return existing
|
||||
}
|
||||
out := make([]string, 0, len(existing)+1)
|
||||
out = append(out, fileID)
|
||||
for _, id := range existing {
|
||||
trimmed := strings.TrimSpace(id)
|
||||
if trimmed == "" || strings.EqualFold(trimmed, fileID) {
|
||||
continue
|
||||
}
|
||||
out = append(out, trimmed)
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -1,353 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"ds2api/internal/auth"
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
func historySplitTestMessages() []any {
|
||||
toolCalls := []any{
|
||||
map[string]any{
|
||||
"name": "search",
|
||||
"arguments": map[string]any{"query": "docs"},
|
||||
},
|
||||
}
|
||||
return []any{
|
||||
map[string]any{"role": "system", "content": "system instructions"},
|
||||
map[string]any{"role": "user", "content": "first user turn"},
|
||||
map[string]any{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"reasoning_content": "hidden reasoning",
|
||||
"tool_calls": toolCalls,
|
||||
},
|
||||
map[string]any{
|
||||
"role": "tool",
|
||||
"name": "search",
|
||||
"tool_call_id": "call-1",
|
||||
"content": "tool result",
|
||||
},
|
||||
map[string]any{"role": "user", "content": "latest user turn"},
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildOpenAIHistoryTranscriptPreservesOrderAndToolHistory(t *testing.T) {
|
||||
promptMessages, historyMessages := splitOpenAIHistoryMessages(historySplitTestMessages(), 1)
|
||||
if len(promptMessages) != 2 {
|
||||
t.Fatalf("expected 2 prompt messages, got %d", len(promptMessages))
|
||||
}
|
||||
if len(historyMessages) != 3 {
|
||||
t.Fatalf("expected 3 history messages, got %d", len(historyMessages))
|
||||
}
|
||||
|
||||
transcript := buildOpenAIHistoryTranscript(historyMessages)
|
||||
if !strings.Contains(transcript, "first user turn") {
|
||||
t.Fatalf("expected user history in transcript, got %s", transcript)
|
||||
}
|
||||
if !strings.Contains(transcript, "<tool_calls>") {
|
||||
t.Fatalf("expected assistant tool_calls in transcript, got %s", transcript)
|
||||
}
|
||||
if !strings.Contains(transcript, "tool_call_id=call-1") {
|
||||
t.Fatalf("expected tool call id in transcript, got %s", transcript)
|
||||
}
|
||||
if !strings.Contains(transcript, "[reasoning_content]") {
|
||||
t.Fatalf("expected reasoning block in HISTORY.txt, got %s", transcript)
|
||||
}
|
||||
if !strings.Contains(transcript, "hidden reasoning") {
|
||||
t.Fatalf("expected reasoning text in HISTORY.txt, got %s", transcript)
|
||||
}
|
||||
|
||||
userIdx := strings.Index(transcript, "=== 1. USER ===")
|
||||
assistantIdx := strings.Index(transcript, "=== 2. ASSISTANT ===")
|
||||
toolIdx := strings.Index(transcript, "=== 3. TOOL ===")
|
||||
if userIdx < 0 || assistantIdx < 0 || toolIdx < 0 {
|
||||
t.Fatalf("expected ordered role sections, got %s", transcript)
|
||||
}
|
||||
if userIdx >= assistantIdx || assistantIdx >= toolIdx {
|
||||
t.Fatalf("expected USER -> ASSISTANT -> TOOL order, got %s", transcript)
|
||||
}
|
||||
if reasoningIdx := strings.Index(transcript, "[reasoning_content]"); reasoningIdx < 0 || reasoningIdx > strings.Index(transcript, "<tool_calls>") {
|
||||
t.Fatalf("expected reasoning block before tool calls, got %s", transcript)
|
||||
}
|
||||
reasoning := extractHistorySplitReasoningContent(historyMessages)
|
||||
if reasoning != "hidden reasoning" {
|
||||
t.Fatalf("expected latest assistant reasoning to be extracted, got %q", reasoning)
|
||||
}
|
||||
|
||||
finalPrompt, _ := buildHistorySplitPrompt(promptMessages, reasoning, nil, util.DefaultToolChoicePolicy(), false)
|
||||
if !strings.Contains(finalPrompt, "latest user turn") {
|
||||
t.Fatalf("expected latest user turn in final prompt, got %s", finalPrompt)
|
||||
}
|
||||
if strings.Contains(finalPrompt, "first user turn") {
|
||||
t.Fatalf("expected earlier history to be removed from final prompt, got %s", finalPrompt)
|
||||
}
|
||||
if !strings.Contains(finalPrompt, "[reasoning_content]") || !strings.Contains(finalPrompt, "hidden reasoning") {
|
||||
t.Fatalf("expected latest assistant reasoning to be attached to prompt, got %s", finalPrompt)
|
||||
}
|
||||
if !strings.Contains(finalPrompt, "HISTORY.txt") {
|
||||
t.Fatalf("expected history instruction in final prompt, got %s", finalPrompt)
|
||||
}
|
||||
if !strings.Contains(finalPrompt, "Follow the instructions in this prompt first") {
|
||||
t.Fatalf("expected stronger prompt override in final prompt, got %s", finalPrompt)
|
||||
}
|
||||
if strings.Index(finalPrompt, "Follow the instructions in this prompt first") > strings.Index(finalPrompt, "Continue the conversation") {
|
||||
t.Fatalf("expected history split instruction before continuity instructions, got %s", finalPrompt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitOpenAIHistoryMessagesUsesLatestUserTurn(t *testing.T) {
|
||||
toolCalls := []any{
|
||||
map[string]any{
|
||||
"name": "search",
|
||||
"arguments": map[string]any{"query": "docs"},
|
||||
},
|
||||
}
|
||||
messages := []any{
|
||||
map[string]any{"role": "system", "content": "system instructions"},
|
||||
map[string]any{"role": "user", "content": "first user turn"},
|
||||
map[string]any{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": toolCalls,
|
||||
},
|
||||
map[string]any{
|
||||
"role": "tool",
|
||||
"name": "search",
|
||||
"tool_call_id": "call-1",
|
||||
"content": "tool result",
|
||||
},
|
||||
map[string]any{"role": "user", "content": "middle user turn"},
|
||||
map[string]any{
|
||||
"role": "assistant",
|
||||
"content": "middle assistant turn",
|
||||
},
|
||||
map[string]any{"role": "user", "content": "latest user turn"},
|
||||
}
|
||||
|
||||
promptMessages, historyMessages := splitOpenAIHistoryMessages(messages, 1)
|
||||
if len(promptMessages) == 0 || len(historyMessages) == 0 {
|
||||
t.Fatalf("expected both prompt and history messages, got prompt=%d history=%d", len(promptMessages), len(historyMessages))
|
||||
}
|
||||
reasoning := extractHistorySplitReasoningContent(historyMessages)
|
||||
if reasoning != "" {
|
||||
t.Fatalf("expected no reasoning in this fixture, got %q", reasoning)
|
||||
}
|
||||
|
||||
promptText, _ := buildHistorySplitPrompt(promptMessages, reasoning, nil, util.DefaultToolChoicePolicy(), false)
|
||||
if !strings.Contains(promptText, "latest user turn") {
|
||||
t.Fatalf("expected latest user turn in prompt, got %s", promptText)
|
||||
}
|
||||
if strings.Contains(promptText, "middle user turn") {
|
||||
t.Fatalf("expected middle user turn to be split into history, got %s", promptText)
|
||||
}
|
||||
|
||||
historyText := buildOpenAIHistoryTranscript(historyMessages)
|
||||
if !strings.Contains(historyText, "middle user turn") {
|
||||
t.Fatalf("expected middle user turn in HISTORY.txt, got %s", historyText)
|
||||
}
|
||||
if strings.Contains(historyText, "latest user turn") {
|
||||
t.Fatalf("expected latest user turn to remain in prompt, got %s", historyText)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyHistorySplitSkipsFirstTurn(t *testing.T) {
|
||||
ds := &inlineUploadDSStub{}
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{
|
||||
wideInput: true,
|
||||
historySplitEnabled: true,
|
||||
historySplitTurns: 1,
|
||||
},
|
||||
DS: ds,
|
||||
}
|
||||
req := map[string]any{
|
||||
"model": "deepseek-chat",
|
||||
"messages": []any{
|
||||
map[string]any{"role": "user", "content": "hello"},
|
||||
},
|
||||
}
|
||||
stdReq, err := normalizeOpenAIChatRequest(h.Store, req, "")
|
||||
if err != nil {
|
||||
t.Fatalf("normalize failed: %v", err)
|
||||
}
|
||||
|
||||
out, err := h.applyHistorySplit(context.Background(), &auth.RequestAuth{DeepSeekToken: "token"}, stdReq)
|
||||
if err != nil {
|
||||
t.Fatalf("apply history split failed: %v", err)
|
||||
}
|
||||
if len(ds.uploadCalls) != 0 {
|
||||
t.Fatalf("expected no upload on first turn, got %d", len(ds.uploadCalls))
|
||||
}
|
||||
if out.FinalPrompt != stdReq.FinalPrompt {
|
||||
t.Fatalf("expected prompt unchanged on first turn")
|
||||
}
|
||||
if len(out.RefFileIDs) != len(stdReq.RefFileIDs) {
|
||||
t.Fatalf("expected ref files unchanged on first turn")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChatCompletionsHistorySplitUploadsHistoryAndKeepsLatestPrompt(t *testing.T) {
|
||||
ds := &inlineUploadDSStub{}
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{
|
||||
wideInput: true,
|
||||
historySplitEnabled: true,
|
||||
historySplitTurns: 1,
|
||||
},
|
||||
Auth: streamStatusAuthStub{},
|
||||
DS: ds,
|
||||
}
|
||||
reqBody, _ := json.Marshal(map[string]any{
|
||||
"model": "deepseek-chat",
|
||||
"messages": historySplitTestMessages(),
|
||||
"stream": false,
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", strings.NewReader(string(reqBody)))
|
||||
req.Header.Set("Authorization", "Bearer direct-token")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
h.ChatCompletions(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if len(ds.uploadCalls) != 1 {
|
||||
t.Fatalf("expected 1 upload call, got %d", len(ds.uploadCalls))
|
||||
}
|
||||
upload := ds.uploadCalls[0]
|
||||
if upload.Filename != "HISTORY.txt" {
|
||||
t.Fatalf("unexpected upload filename: %q", upload.Filename)
|
||||
}
|
||||
if upload.ContentType != "text/plain; charset=utf-8" {
|
||||
t.Fatalf("unexpected content type: %q", upload.ContentType)
|
||||
}
|
||||
if upload.Purpose != "assistants" {
|
||||
t.Fatalf("unexpected purpose: %q", upload.Purpose)
|
||||
}
|
||||
historyText := string(upload.Data)
|
||||
if !strings.Contains(historyText, "first user turn") || !strings.Contains(historyText, "tool result") {
|
||||
t.Fatalf("expected older turns in HISTORY.txt, got %s", historyText)
|
||||
}
|
||||
if strings.Contains(historyText, "latest user turn") {
|
||||
t.Fatalf("expected latest turn to remain in prompt, got %s", historyText)
|
||||
}
|
||||
if ds.completionReq == nil {
|
||||
t.Fatal("expected completion payload to be captured")
|
||||
}
|
||||
promptText, _ := ds.completionReq["prompt"].(string)
|
||||
if !strings.Contains(promptText, "latest user turn") {
|
||||
t.Fatalf("expected latest turn in completion prompt, got %s", promptText)
|
||||
}
|
||||
if strings.Contains(promptText, "first user turn") {
|
||||
t.Fatalf("expected historical turns removed from completion prompt, got %s", promptText)
|
||||
}
|
||||
if !strings.Contains(promptText, "[reasoning_content]") || !strings.Contains(promptText, "hidden reasoning") {
|
||||
t.Fatalf("expected latest assistant reasoning to be attached to completion prompt, got %s", promptText)
|
||||
}
|
||||
if !strings.Contains(promptText, "HISTORY.txt") {
|
||||
t.Fatalf("expected history instruction in completion prompt, got %s", promptText)
|
||||
}
|
||||
if !strings.Contains(promptText, "Follow the instructions in this prompt first") {
|
||||
t.Fatalf("expected stronger prompt override in completion prompt, got %s", promptText)
|
||||
}
|
||||
if strings.Index(promptText, "Follow the instructions in this prompt first") > strings.Index(promptText, "Continue the conversation") {
|
||||
t.Fatalf("expected history split instruction before continuity instructions, got %s", promptText)
|
||||
}
|
||||
refIDs, _ := ds.completionReq["ref_file_ids"].([]any)
|
||||
if len(refIDs) == 0 || refIDs[0] != "file-inline-1" {
|
||||
t.Fatalf("expected uploaded history file to be first ref_file_id, got %#v", ds.completionReq["ref_file_ids"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponsesHistorySplitUploadsHistoryAndKeepsLatestPrompt(t *testing.T) {
|
||||
ds := &inlineUploadDSStub{}
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{
|
||||
wideInput: true,
|
||||
historySplitEnabled: true,
|
||||
historySplitTurns: 1,
|
||||
},
|
||||
Auth: streamStatusAuthStub{},
|
||||
DS: ds,
|
||||
}
|
||||
r := chi.NewRouter()
|
||||
RegisterRoutes(r, h)
|
||||
reqBody, _ := json.Marshal(map[string]any{
|
||||
"model": "deepseek-chat",
|
||||
"messages": historySplitTestMessages(),
|
||||
"stream": false,
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(string(reqBody)))
|
||||
req.Header.Set("Authorization", "Bearer direct-token")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if len(ds.uploadCalls) != 1 {
|
||||
t.Fatalf("expected 1 upload call, got %d", len(ds.uploadCalls))
|
||||
}
|
||||
if ds.completionReq == nil {
|
||||
t.Fatal("expected completion payload to be captured")
|
||||
}
|
||||
promptText, _ := ds.completionReq["prompt"].(string)
|
||||
if !strings.Contains(promptText, "latest user turn") {
|
||||
t.Fatalf("expected latest turn in completion prompt, got %s", promptText)
|
||||
}
|
||||
if strings.Contains(promptText, "first user turn") {
|
||||
t.Fatalf("expected historical turns removed from completion prompt, got %s", promptText)
|
||||
}
|
||||
if !strings.Contains(promptText, "[reasoning_content]") || !strings.Contains(promptText, "hidden reasoning") {
|
||||
t.Fatalf("expected latest assistant reasoning to be attached to completion prompt, got %s", promptText)
|
||||
}
|
||||
if !strings.Contains(promptText, "Follow the instructions in this prompt first") {
|
||||
t.Fatalf("expected stronger prompt override in completion prompt, got %s", promptText)
|
||||
}
|
||||
if strings.Index(promptText, "Follow the instructions in this prompt first") > strings.Index(promptText, "Continue the conversation") {
|
||||
t.Fatalf("expected history split instruction before continuity instructions, got %s", promptText)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChatCompletionsHistorySplitUploadFailureReturnsInternalServerError(t *testing.T) {
|
||||
ds := &inlineUploadDSStub{uploadErr: context.DeadlineExceeded}
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{
|
||||
wideInput: true,
|
||||
historySplitEnabled: true,
|
||||
historySplitTurns: 1,
|
||||
},
|
||||
Auth: streamStatusAuthStub{},
|
||||
DS: ds,
|
||||
}
|
||||
reqBody, _ := json.Marshal(map[string]any{
|
||||
"model": "deepseek-chat",
|
||||
"messages": historySplitTestMessages(),
|
||||
"stream": false,
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", strings.NewReader(string(reqBody)))
|
||||
req.Header.Set("Authorization", "Bearer direct-token")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
h.ChatCompletions(rec, req)
|
||||
|
||||
if rec.Code != http.StatusInternalServerError {
|
||||
t.Fatalf("expected 500, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if ds.completionReq != nil {
|
||||
t.Fatalf("did not expect completion payload on upload failure")
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"ds2api/internal/deepseek"
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
func buildOpenAIFinalPrompt(messagesRaw []any, toolsRaw any, traceID string, thinkingEnabled bool) (string, []string) {
|
||||
return buildOpenAIFinalPromptWithPolicy(messagesRaw, toolsRaw, traceID, util.DefaultToolChoicePolicy(), thinkingEnabled)
|
||||
}
|
||||
|
||||
func buildOpenAIFinalPromptWithPolicy(messagesRaw []any, toolsRaw any, traceID string, toolPolicy util.ToolChoicePolicy, thinkingEnabled bool) (string, []string) {
|
||||
messages := normalizeOpenAIMessagesForPrompt(messagesRaw, traceID)
|
||||
toolNames := []string{}
|
||||
if tools, ok := toolsRaw.([]any); ok && len(tools) > 0 {
|
||||
messages, toolNames = injectToolPrompt(messages, tools, toolPolicy)
|
||||
}
|
||||
return deepseek.MessagesPrepareWithThinking(messages, thinkingEnabled), toolNames
|
||||
}
|
||||
|
||||
// BuildPromptForAdapter exposes the OpenAI-compatible prompt building flow so
|
||||
// other protocol adapters (for example Gemini) can reuse the same tool/history
|
||||
// normalization logic and remain behavior-compatible with chat/completions.
|
||||
func BuildPromptForAdapter(messagesRaw []any, toolsRaw any, traceID string, thinkingEnabled bool) (string, []string) {
|
||||
return buildOpenAIFinalPrompt(messagesRaw, toolsRaw, traceID, thinkingEnabled)
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBuildOpenAIFinalPrompt_HandlerPathIncludesToolRoundtripSemantics(t *testing.T) {
|
||||
messages := []any{
|
||||
map[string]any{"role": "user", "content": "查北京天气"},
|
||||
map[string]any{
|
||||
"role": "assistant",
|
||||
"tool_calls": []any{
|
||||
map[string]any{
|
||||
"id": "call_1",
|
||||
"function": map[string]any{
|
||||
"name": "get_weather",
|
||||
"arguments": "{\"city\":\"beijing\"}",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"role": "tool",
|
||||
"tool_call_id": "call_1",
|
||||
"name": "get_weather",
|
||||
"content": map[string]any{"temp": 18, "condition": "sunny"},
|
||||
},
|
||||
}
|
||||
tools := []any{
|
||||
map[string]any{
|
||||
"type": "function",
|
||||
"function": map[string]any{
|
||||
"name": "get_weather",
|
||||
"description": "Get weather",
|
||||
"parameters": map[string]any{
|
||||
"type": "object",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
finalPrompt, toolNames := buildOpenAIFinalPrompt(messages, tools, "", false)
|
||||
if len(toolNames) != 1 || toolNames[0] != "get_weather" {
|
||||
t.Fatalf("unexpected tool names: %#v", toolNames)
|
||||
}
|
||||
if !strings.Contains(finalPrompt, `"condition":"sunny"`) {
|
||||
t.Fatalf("handler finalPrompt should preserve tool output content: %q", finalPrompt)
|
||||
}
|
||||
if !strings.Contains(finalPrompt, "<tool_calls>") {
|
||||
t.Fatalf("handler finalPrompt should preserve assistant tool history: %q", finalPrompt)
|
||||
}
|
||||
if !strings.Contains(finalPrompt, "<tool_name>get_weather</tool_name>") {
|
||||
t.Fatalf("handler finalPrompt should include tool name history: %q", finalPrompt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildOpenAIFinalPrompt_VercelPreparePathKeepsFinalAnswerInstruction(t *testing.T) {
|
||||
messages := []any{
|
||||
map[string]any{"role": "system", "content": "You are helpful"},
|
||||
map[string]any{"role": "user", "content": "请调用工具"},
|
||||
}
|
||||
tools := []any{
|
||||
map[string]any{
|
||||
"type": "function",
|
||||
"function": map[string]any{
|
||||
"name": "search",
|
||||
"description": "search docs",
|
||||
"parameters": map[string]any{
|
||||
"type": "object",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
finalPrompt, _ := buildOpenAIFinalPrompt(messages, tools, "", false)
|
||||
if !strings.Contains(finalPrompt, "Remember: The ONLY valid way to use tools is the <tool_calls> XML block at the end of your response.") {
|
||||
t.Fatalf("vercel prepare finalPrompt missing final tool-call anchor instruction: %q", finalPrompt)
|
||||
}
|
||||
if !strings.Contains(finalPrompt, "TOOL CALL FORMAT") {
|
||||
t.Fatalf("vercel prepare finalPrompt missing xml format instruction: %q", finalPrompt)
|
||||
}
|
||||
if !strings.Contains(finalPrompt, "Do NOT wrap XML in markdown fences") {
|
||||
t.Fatalf("vercel prepare finalPrompt missing no-fence xml instruction: %q", finalPrompt)
|
||||
}
|
||||
if strings.Contains(finalPrompt, "```json") {
|
||||
t.Fatalf("vercel prepare finalPrompt should not require fenced tool calls: %q", finalPrompt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildOpenAIFinalPromptWithThinkingAddsContinuationContract(t *testing.T) {
|
||||
messages := []any{
|
||||
map[string]any{"role": "user", "content": "继续回答上一个问题"},
|
||||
}
|
||||
|
||||
finalPrompt, _ := buildOpenAIFinalPrompt(messages, nil, "", true)
|
||||
if !strings.Contains(finalPrompt, "Continue the conversation from the full prior context") {
|
||||
t.Fatalf("expected continuation contract in thinking prompt, got=%q", finalPrompt)
|
||||
}
|
||||
if !strings.Contains(finalPrompt, "final user-facing answer only in reasoning") {
|
||||
t.Fatalf("expected visible-answer contract in thinking prompt, got=%q", finalPrompt)
|
||||
}
|
||||
}
|
||||
@@ -1,210 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"ds2api/internal/config"
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
func newEmptyStoreForNormalizeTest(t *testing.T) *config.Store {
|
||||
t.Helper()
|
||||
t.Setenv("DS2API_CONFIG_JSON", `{}`)
|
||||
return config.LoadStore()
|
||||
}
|
||||
|
||||
func TestNormalizeOpenAIChatRequest(t *testing.T) {
|
||||
store := newEmptyStoreForNormalizeTest(t)
|
||||
req := map[string]any{
|
||||
"model": "gpt-5-codex",
|
||||
"messages": []any{
|
||||
map[string]any{"role": "user", "content": "hello"},
|
||||
},
|
||||
"temperature": 0.3,
|
||||
"stream": true,
|
||||
}
|
||||
n, err := normalizeOpenAIChatRequest(store, req, "")
|
||||
if err != nil {
|
||||
t.Fatalf("normalize failed: %v", err)
|
||||
}
|
||||
if n.ResolvedModel != "deepseek-reasoner" {
|
||||
t.Fatalf("unexpected resolved model: %s", n.ResolvedModel)
|
||||
}
|
||||
if !n.Stream {
|
||||
t.Fatalf("expected stream=true")
|
||||
}
|
||||
if _, ok := n.PassThrough["temperature"]; !ok {
|
||||
t.Fatalf("expected temperature passthrough")
|
||||
}
|
||||
if n.FinalPrompt == "" {
|
||||
t.Fatalf("expected non-empty final prompt")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeOpenAIChatRequestCollectsRefFileIDs(t *testing.T) {
|
||||
store := newEmptyStoreForNormalizeTest(t)
|
||||
req := map[string]any{
|
||||
"model": "gpt-5-codex",
|
||||
"messages": []any{
|
||||
map[string]any{
|
||||
"role": "user",
|
||||
"content": []any{
|
||||
map[string]any{"type": "input_text", "text": "hello"},
|
||||
map[string]any{"type": "input_file", "file_id": "file-msg"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"attachments": []any{
|
||||
map[string]any{"file_id": "file-attachment"},
|
||||
},
|
||||
"ref_file_ids": []any{"file-top", "file-attachment"},
|
||||
}
|
||||
n, err := normalizeOpenAIChatRequest(store, req, "")
|
||||
if err != nil {
|
||||
t.Fatalf("normalize failed: %v", err)
|
||||
}
|
||||
if len(n.RefFileIDs) != 3 {
|
||||
t.Fatalf("expected 3 distinct file ids, got %#v", n.RefFileIDs)
|
||||
}
|
||||
if n.RefFileIDs[0] != "file-top" || n.RefFileIDs[1] != "file-attachment" || n.RefFileIDs[2] != "file-msg" {
|
||||
t.Fatalf("unexpected file ids: %#v", n.RefFileIDs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeOpenAIResponsesRequestInput(t *testing.T) {
|
||||
store := newEmptyStoreForNormalizeTest(t)
|
||||
req := map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"input": "ping",
|
||||
"instructions": "system",
|
||||
}
|
||||
n, err := normalizeOpenAIResponsesRequest(store, req, "")
|
||||
if err != nil {
|
||||
t.Fatalf("normalize failed: %v", err)
|
||||
}
|
||||
if n.ResolvedModel != "deepseek-chat" {
|
||||
t.Fatalf("unexpected resolved model: %s", n.ResolvedModel)
|
||||
}
|
||||
if len(n.Messages) != 2 {
|
||||
t.Fatalf("expected 2 normalized messages, got %d", len(n.Messages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeOpenAIResponsesRequestToolChoiceRequired(t *testing.T) {
|
||||
store := newEmptyStoreForNormalizeTest(t)
|
||||
req := map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"input": "ping",
|
||||
"tools": []any{
|
||||
map[string]any{
|
||||
"type": "function",
|
||||
"function": map[string]any{
|
||||
"name": "search",
|
||||
"parameters": map[string]any{
|
||||
"type": "object",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"tool_choice": "required",
|
||||
}
|
||||
n, err := normalizeOpenAIResponsesRequest(store, req, "")
|
||||
if err != nil {
|
||||
t.Fatalf("normalize failed: %v", err)
|
||||
}
|
||||
if n.ToolChoice.Mode != util.ToolChoiceRequired {
|
||||
t.Fatalf("expected tool choice mode required, got %q", n.ToolChoice.Mode)
|
||||
}
|
||||
if len(n.ToolNames) != 1 || n.ToolNames[0] != "search" {
|
||||
t.Fatalf("unexpected tool names: %#v", n.ToolNames)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeOpenAIResponsesRequestToolChoiceForcedFunction(t *testing.T) {
|
||||
store := newEmptyStoreForNormalizeTest(t)
|
||||
req := map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"input": "ping",
|
||||
"tools": []any{
|
||||
map[string]any{
|
||||
"type": "function",
|
||||
"function": map[string]any{
|
||||
"name": "search",
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"type": "function",
|
||||
"function": map[string]any{
|
||||
"name": "read_file",
|
||||
},
|
||||
},
|
||||
},
|
||||
"tool_choice": map[string]any{
|
||||
"type": "function",
|
||||
"name": "read_file",
|
||||
},
|
||||
}
|
||||
n, err := normalizeOpenAIResponsesRequest(store, req, "")
|
||||
if err != nil {
|
||||
t.Fatalf("normalize failed: %v", err)
|
||||
}
|
||||
if n.ToolChoice.Mode != util.ToolChoiceForced {
|
||||
t.Fatalf("expected tool choice mode forced, got %q", n.ToolChoice.Mode)
|
||||
}
|
||||
if n.ToolChoice.ForcedName != "read_file" {
|
||||
t.Fatalf("expected forced tool name read_file, got %q", n.ToolChoice.ForcedName)
|
||||
}
|
||||
if len(n.ToolNames) != 1 || n.ToolNames[0] != "read_file" {
|
||||
t.Fatalf("expected filtered tool names [read_file], got %#v", n.ToolNames)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeOpenAIResponsesRequestToolChoiceForcedUndeclaredFails(t *testing.T) {
|
||||
store := newEmptyStoreForNormalizeTest(t)
|
||||
req := map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"input": "ping",
|
||||
"tools": []any{
|
||||
map[string]any{
|
||||
"type": "function",
|
||||
"function": map[string]any{
|
||||
"name": "search",
|
||||
},
|
||||
},
|
||||
},
|
||||
"tool_choice": map[string]any{
|
||||
"type": "function",
|
||||
"name": "read_file",
|
||||
},
|
||||
}
|
||||
if _, err := normalizeOpenAIResponsesRequest(store, req, ""); err == nil {
|
||||
t.Fatalf("expected forced undeclared tool to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeOpenAIResponsesRequestToolChoiceNoneKeepsToolDetectionEnabled(t *testing.T) {
|
||||
store := newEmptyStoreForNormalizeTest(t)
|
||||
req := map[string]any{
|
||||
"model": "gpt-4o",
|
||||
"input": "ping",
|
||||
"tools": []any{
|
||||
map[string]any{
|
||||
"type": "function",
|
||||
"function": map[string]any{
|
||||
"name": "search",
|
||||
},
|
||||
},
|
||||
},
|
||||
"tool_choice": "none",
|
||||
}
|
||||
n, err := normalizeOpenAIResponsesRequest(store, req, "")
|
||||
if err != nil {
|
||||
t.Fatalf("normalize failed: %v", err)
|
||||
}
|
||||
if n.ToolChoice.Mode != util.ToolChoiceNone {
|
||||
t.Fatalf("expected tool choice mode none, got %q", n.ToolChoice.Mode)
|
||||
}
|
||||
if len(n.ToolNames) == 0 {
|
||||
t.Fatalf("expected tool detection sentinel when tool_choice=none, got %#v", n.ToolNames)
|
||||
}
|
||||
}
|
||||
@@ -1,334 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
chimw "github.com/go-chi/chi/v5/middleware"
|
||||
|
||||
"ds2api/internal/auth"
|
||||
"ds2api/internal/deepseek"
|
||||
)
|
||||
|
||||
type streamStatusAuthStub struct{}
|
||||
|
||||
func (streamStatusAuthStub) Determine(_ *http.Request) (*auth.RequestAuth, error) {
|
||||
return &auth.RequestAuth{
|
||||
UseConfigToken: false,
|
||||
DeepSeekToken: "direct-token",
|
||||
CallerID: "caller:test",
|
||||
TriedAccounts: map[string]bool{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (streamStatusAuthStub) DetermineCaller(_ *http.Request) (*auth.RequestAuth, error) {
|
||||
return &auth.RequestAuth{
|
||||
UseConfigToken: false,
|
||||
DeepSeekToken: "direct-token",
|
||||
CallerID: "caller:test",
|
||||
TriedAccounts: map[string]bool{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (streamStatusAuthStub) Release(_ *auth.RequestAuth) {}
|
||||
|
||||
type streamStatusDSStub struct {
|
||||
resp *http.Response
|
||||
}
|
||||
|
||||
func (m streamStatusDSStub) CreateSession(_ context.Context, _ *auth.RequestAuth, _ int) (string, error) {
|
||||
return "session-id", nil
|
||||
}
|
||||
|
||||
func (m streamStatusDSStub) GetPow(_ context.Context, _ *auth.RequestAuth, _ int) (string, error) {
|
||||
return "pow", nil
|
||||
}
|
||||
|
||||
func (m streamStatusDSStub) UploadFile(_ context.Context, _ *auth.RequestAuth, _ deepseek.UploadFileRequest, _ int) (*deepseek.UploadFileResult, error) {
|
||||
return &deepseek.UploadFileResult{ID: "file-id", Filename: "file.txt", Bytes: 1, Status: "uploaded"}, nil
|
||||
}
|
||||
|
||||
func (m streamStatusDSStub) CallCompletion(_ context.Context, _ *auth.RequestAuth, _ map[string]any, _ string, _ int) (*http.Response, error) {
|
||||
return m.resp, nil
|
||||
}
|
||||
|
||||
func (m streamStatusDSStub) DeleteSessionForToken(_ context.Context, _ string, _ string) (*deepseek.DeleteSessionResult, error) {
|
||||
return &deepseek.DeleteSessionResult{Success: true}, nil
|
||||
}
|
||||
|
||||
func (m streamStatusDSStub) DeleteAllSessionsForToken(_ context.Context, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeOpenAISSEHTTPResponse(lines ...string) *http.Response {
|
||||
body := strings.Join(lines, "\n")
|
||||
if !strings.HasSuffix(body, "\n") {
|
||||
body += "\n"
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(strings.NewReader(body)),
|
||||
}
|
||||
}
|
||||
|
||||
func captureStatusMiddleware(statuses *[]int) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ww := chimw.NewWrapResponseWriter(w, r.ProtoMajor)
|
||||
next.ServeHTTP(ww, r)
|
||||
*statuses = append(*statuses, ww.Status())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestChatCompletionsStreamStatusCapturedAs200(t *testing.T) {
|
||||
statuses := make([]int, 0, 1)
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{wideInput: true},
|
||||
Auth: streamStatusAuthStub{},
|
||||
DS: streamStatusDSStub{resp: makeOpenAISSEHTTPResponse(`data: {"p":"response/content","v":"hello"}`, "data: [DONE]")},
|
||||
}
|
||||
r := chi.NewRouter()
|
||||
r.Use(captureStatusMiddleware(&statuses))
|
||||
RegisterRoutes(r, h)
|
||||
|
||||
reqBody := `{"model":"deepseek-chat","messages":[{"role":"user","content":"hi"}],"stream":true}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", strings.NewReader(reqBody))
|
||||
req.Header.Set("Authorization", "Bearer direct-token")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if len(statuses) != 1 {
|
||||
t.Fatalf("expected one captured status, got %d", len(statuses))
|
||||
}
|
||||
if statuses[0] != http.StatusOK {
|
||||
t.Fatalf("expected captured status 200 (not 000), got %d", statuses[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponsesStreamStatusCapturedAs200(t *testing.T) {
|
||||
statuses := make([]int, 0, 1)
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{wideInput: true},
|
||||
Auth: streamStatusAuthStub{},
|
||||
DS: streamStatusDSStub{resp: makeOpenAISSEHTTPResponse(`data: {"p":"response/content","v":"hello"}`, "data: [DONE]")},
|
||||
}
|
||||
r := chi.NewRouter()
|
||||
r.Use(captureStatusMiddleware(&statuses))
|
||||
RegisterRoutes(r, h)
|
||||
|
||||
reqBody := `{"model":"deepseek-chat","input":"hi","stream":true}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(reqBody))
|
||||
req.Header.Set("Authorization", "Bearer direct-token")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if len(statuses) != 1 {
|
||||
t.Fatalf("expected one captured status, got %d", len(statuses))
|
||||
}
|
||||
if statuses[0] != http.StatusOK {
|
||||
t.Fatalf("expected captured status 200 (not 000), got %d", statuses[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestChatCompletionsStreamContentFilterStopsNormallyWithoutLeak(t *testing.T) {
|
||||
statuses := make([]int, 0, 1)
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{wideInput: true},
|
||||
Auth: streamStatusAuthStub{},
|
||||
DS: streamStatusDSStub{resp: makeOpenAISSEHTTPResponse(
|
||||
`data: {"p":"response/content","v":"合法前缀"}`,
|
||||
`data: {"p":"response/status","v":"CONTENT_FILTER","accumulated_token_usage":77}`,
|
||||
`data: {"p":"response/content","v":"CONTENT_FILTER你好,这个问题我暂时无法回答,让我们换个话题再聊聊吧。"}`,
|
||||
)},
|
||||
}
|
||||
r := chi.NewRouter()
|
||||
r.Use(captureStatusMiddleware(&statuses))
|
||||
RegisterRoutes(r, h)
|
||||
|
||||
reqBody := `{"model":"deepseek-chat","messages":[{"role":"user","content":"hi"}],"stream":true}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", strings.NewReader(reqBody))
|
||||
req.Header.Set("Authorization", "Bearer direct-token")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if len(statuses) != 1 || statuses[0] != http.StatusOK {
|
||||
t.Fatalf("expected captured status 200, got %#v", statuses)
|
||||
}
|
||||
if strings.Contains(rec.Body.String(), "这个问题我暂时无法回答") {
|
||||
t.Fatalf("expected leaked content-filter suffix to be hidden, body=%s", rec.Body.String())
|
||||
}
|
||||
|
||||
frames, done := parseSSEDataFrames(t, rec.Body.String())
|
||||
if !done {
|
||||
t.Fatalf("expected [DONE], body=%s", rec.Body.String())
|
||||
}
|
||||
if len(frames) == 0 {
|
||||
t.Fatalf("expected at least one json frame, body=%s", rec.Body.String())
|
||||
}
|
||||
last := frames[len(frames)-1]
|
||||
choices, _ := last["choices"].([]any)
|
||||
if len(choices) != 1 {
|
||||
t.Fatalf("expected one choice in final frame, got %#v", last)
|
||||
}
|
||||
choice, _ := choices[0].(map[string]any)
|
||||
if choice["finish_reason"] != "stop" {
|
||||
t.Fatalf("expected finish_reason=stop for content-filter upstream stop, got %#v", choice["finish_reason"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestChatCompletionsStreamEmitsFailureFrameWhenUpstreamOutputEmpty(t *testing.T) {
|
||||
statuses := make([]int, 0, 1)
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{wideInput: true},
|
||||
Auth: streamStatusAuthStub{},
|
||||
DS: streamStatusDSStub{resp: makeOpenAISSEHTTPResponse("data: [DONE]")},
|
||||
}
|
||||
r := chi.NewRouter()
|
||||
r.Use(captureStatusMiddleware(&statuses))
|
||||
RegisterRoutes(r, h)
|
||||
|
||||
reqBody := `{"model":"deepseek-chat","messages":[{"role":"user","content":"hi"}],"stream":true}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", strings.NewReader(reqBody))
|
||||
req.Header.Set("Authorization", "Bearer direct-token")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if len(statuses) != 1 || statuses[0] != http.StatusOK {
|
||||
t.Fatalf("expected captured status 200, got %#v", statuses)
|
||||
}
|
||||
|
||||
frames, done := parseSSEDataFrames(t, rec.Body.String())
|
||||
if !done {
|
||||
t.Fatalf("expected [DONE], body=%s", rec.Body.String())
|
||||
}
|
||||
if len(frames) != 1 {
|
||||
t.Fatalf("expected one failure frame, got %#v body=%s", frames, rec.Body.String())
|
||||
}
|
||||
last := frames[0]
|
||||
statusCode, ok := last["status_code"].(float64)
|
||||
if !ok || int(statusCode) != http.StatusTooManyRequests {
|
||||
t.Fatalf("expected status_code=429, got %#v body=%s", last["status_code"], rec.Body.String())
|
||||
}
|
||||
errObj, _ := last["error"].(map[string]any)
|
||||
if asString(errObj["code"]) != "upstream_empty_output" {
|
||||
t.Fatalf("expected code=upstream_empty_output, got %#v", last)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponsesStreamUsageIgnoresBatchAccumulatedTokenUsage(t *testing.T) {
|
||||
statuses := make([]int, 0, 1)
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{wideInput: true},
|
||||
Auth: streamStatusAuthStub{},
|
||||
DS: streamStatusDSStub{resp: makeOpenAISSEHTTPResponse(
|
||||
`data: {"p":"response/content","v":"hello"}`,
|
||||
`data: {"p":"response","o":"BATCH","v":[{"p":"accumulated_token_usage","v":190},{"p":"quasi_status","v":"FINISHED"}]}`,
|
||||
)},
|
||||
}
|
||||
r := chi.NewRouter()
|
||||
r.Use(captureStatusMiddleware(&statuses))
|
||||
RegisterRoutes(r, h)
|
||||
|
||||
reqBody := `{"model":"deepseek-chat","input":"hi","stream":true}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(reqBody))
|
||||
req.Header.Set("Authorization", "Bearer direct-token")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if len(statuses) != 1 || statuses[0] != http.StatusOK {
|
||||
t.Fatalf("expected captured status 200, got %#v", statuses)
|
||||
}
|
||||
frames, done := parseSSEDataFrames(t, rec.Body.String())
|
||||
if !done {
|
||||
t.Fatalf("expected [DONE], body=%s", rec.Body.String())
|
||||
}
|
||||
if len(frames) == 0 {
|
||||
t.Fatalf("expected at least one json frame, body=%s", rec.Body.String())
|
||||
}
|
||||
last := frames[len(frames)-1]
|
||||
resp, _ := last["response"].(map[string]any)
|
||||
if resp == nil {
|
||||
t.Fatalf("expected response payload in final frame, got %#v", last)
|
||||
}
|
||||
usage, _ := resp["usage"].(map[string]any)
|
||||
if usage == nil {
|
||||
t.Fatalf("expected usage in response payload, got %#v", resp)
|
||||
}
|
||||
if got, _ := usage["output_tokens"].(float64); int(got) == 190 {
|
||||
t.Fatalf("expected upstream accumulated token usage to be ignored, got %#v", usage["output_tokens"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponsesNonStreamUsageIgnoresPromptAndOutputTokenUsage(t *testing.T) {
|
||||
statuses := make([]int, 0, 1)
|
||||
h := &Handler{
|
||||
Store: mockOpenAIConfig{wideInput: true},
|
||||
Auth: streamStatusAuthStub{},
|
||||
DS: streamStatusDSStub{resp: makeOpenAISSEHTTPResponse(
|
||||
`data: {"p":"response/content","v":"ok"}`,
|
||||
`data: {"p":"response","o":"BATCH","v":[{"p":"token_usage","v":{"prompt_tokens":11,"completion_tokens":29}},{"p":"quasi_status","v":"FINISHED"}]}`,
|
||||
)},
|
||||
}
|
||||
r := chi.NewRouter()
|
||||
r.Use(captureStatusMiddleware(&statuses))
|
||||
RegisterRoutes(r, h)
|
||||
|
||||
reqBody := `{"model":"deepseek-chat","input":"hi","stream":false}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(reqBody))
|
||||
req.Header.Set("Authorization", "Bearer direct-token")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if len(statuses) != 1 || statuses[0] != http.StatusOK {
|
||||
t.Fatalf("expected captured status 200, got %#v", statuses)
|
||||
}
|
||||
var out map[string]any
|
||||
if err := json.Unmarshal(rec.Body.Bytes(), &out); err != nil {
|
||||
t.Fatalf("decode response failed: %v body=%s", err, rec.Body.String())
|
||||
}
|
||||
usage, _ := out["usage"].(map[string]any)
|
||||
if usage == nil {
|
||||
t.Fatalf("expected usage object, got %#v", out)
|
||||
}
|
||||
input, _ := usage["input_tokens"].(float64)
|
||||
output, _ := usage["output_tokens"].(float64)
|
||||
total, _ := usage["total_tokens"].(float64)
|
||||
if int(output) == 29 {
|
||||
t.Fatalf("expected upstream completion token usage to be ignored, got %#v", usage["output_tokens"])
|
||||
}
|
||||
if int(total) != int(input)+int(output) {
|
||||
t.Fatalf("expected total_tokens=input_tokens+output_tokens, usage=%#v", usage)
|
||||
}
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"ds2api/internal/toolcall"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type toolStreamSieveState struct {
|
||||
pending strings.Builder
|
||||
capture strings.Builder
|
||||
capturing bool
|
||||
codeFenceStack []int
|
||||
codeFencePendingTicks int
|
||||
codeFenceLineStart bool
|
||||
pendingToolRaw string
|
||||
pendingToolCalls []toolcall.ParsedToolCall
|
||||
disableDeltas bool
|
||||
toolNameSent bool
|
||||
toolName string
|
||||
toolArgsStart int
|
||||
toolArgsSent int
|
||||
toolArgsString bool
|
||||
toolArgsDone bool
|
||||
}
|
||||
|
||||
type toolStreamEvent struct {
|
||||
Content string
|
||||
ToolCalls []toolcall.ParsedToolCall
|
||||
ToolCallDeltas []toolCallDelta
|
||||
}
|
||||
|
||||
type toolCallDelta struct {
|
||||
Index int
|
||||
Name string
|
||||
Arguments string
|
||||
}
|
||||
|
||||
func (s *toolStreamSieveState) resetIncrementalToolState() {
|
||||
s.disableDeltas = false
|
||||
s.toolNameSent = false
|
||||
s.toolName = ""
|
||||
s.toolArgsStart = -1
|
||||
s.toolArgsSent = -1
|
||||
s.toolArgsString = false
|
||||
s.toolArgsDone = false
|
||||
}
|
||||
|
||||
func (s *toolStreamSieveState) noteText(content string) {
|
||||
if !hasMeaningfulText(content) {
|
||||
return
|
||||
}
|
||||
updateCodeFenceState(s, content)
|
||||
}
|
||||
|
||||
func hasMeaningfulText(text string) bool {
|
||||
return strings.TrimSpace(text) != ""
|
||||
}
|
||||
|
||||
func insideCodeFenceWithState(state *toolStreamSieveState, text string) bool {
|
||||
if state == nil {
|
||||
return insideCodeFence(text)
|
||||
}
|
||||
simulated := simulateCodeFenceState(
|
||||
state.codeFenceStack,
|
||||
state.codeFencePendingTicks,
|
||||
state.codeFenceLineStart,
|
||||
text,
|
||||
)
|
||||
return len(simulated.stack) > 0
|
||||
}
|
||||
|
||||
func insideCodeFence(text string) bool {
|
||||
if text == "" {
|
||||
return false
|
||||
}
|
||||
return len(simulateCodeFenceState(nil, 0, true, text).stack) > 0
|
||||
}
|
||||
|
||||
func updateCodeFenceState(state *toolStreamSieveState, text string) {
|
||||
if state == nil || !hasMeaningfulText(text) {
|
||||
return
|
||||
}
|
||||
next := simulateCodeFenceState(
|
||||
state.codeFenceStack,
|
||||
state.codeFencePendingTicks,
|
||||
state.codeFenceLineStart,
|
||||
text,
|
||||
)
|
||||
state.codeFenceStack = next.stack
|
||||
state.codeFencePendingTicks = next.pendingTicks
|
||||
state.codeFenceLineStart = next.lineStart
|
||||
}
|
||||
|
||||
type codeFenceSimulation struct {
|
||||
stack []int
|
||||
pendingTicks int
|
||||
lineStart bool
|
||||
}
|
||||
|
||||
func simulateCodeFenceState(stack []int, pendingTicks int, lineStart bool, text string) codeFenceSimulation {
|
||||
chunk := text
|
||||
nextStack := append([]int(nil), stack...)
|
||||
ticks := pendingTicks
|
||||
atLineStart := lineStart
|
||||
|
||||
flushTicks := func() {
|
||||
if ticks > 0 {
|
||||
if atLineStart && ticks >= 3 {
|
||||
applyFenceMarker(&nextStack, ticks)
|
||||
}
|
||||
atLineStart = false
|
||||
ticks = 0
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(chunk); i++ {
|
||||
ch := chunk[i]
|
||||
if ch == '`' {
|
||||
ticks++
|
||||
continue
|
||||
}
|
||||
flushTicks()
|
||||
switch ch {
|
||||
case '\n', '\r':
|
||||
atLineStart = true
|
||||
case ' ', '\t':
|
||||
if atLineStart {
|
||||
continue
|
||||
}
|
||||
atLineStart = false
|
||||
default:
|
||||
atLineStart = false
|
||||
}
|
||||
}
|
||||
|
||||
return codeFenceSimulation{
|
||||
stack: nextStack,
|
||||
pendingTicks: ticks,
|
||||
lineStart: atLineStart,
|
||||
}
|
||||
}
|
||||
|
||||
func applyFenceMarker(stack *[]int, ticks int) {
|
||||
if stack == nil || ticks <= 0 {
|
||||
return
|
||||
}
|
||||
if len(*stack) == 0 {
|
||||
*stack = append(*stack, ticks)
|
||||
return
|
||||
}
|
||||
top := (*stack)[len(*stack)-1]
|
||||
if ticks >= top {
|
||||
*stack = (*stack)[:len(*stack)-1]
|
||||
return
|
||||
}
|
||||
*stack = append(*stack, ticks)
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"ds2api/internal/toolcall"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// --- XML tool call support for the streaming sieve ---
|
||||
|
||||
//nolint:unused // kept as explicit tag inventory for future XML sieve refinements.
|
||||
var xmlToolCallClosingTags = []string{"</tool_calls>", "</tool_call>", "</invoke>", "</function_call>", "</function_calls>", "</tool_use>",
|
||||
// Agent-style XML tags (Roo Code, Cline, etc.)
|
||||
"</attempt_completion>", "</ask_followup_question>", "</new_task>", "</result>"}
|
||||
var xmlToolCallOpeningTags = []string{"<tool_calls", "<tool_call", "<invoke", "<function_call", "<function_calls", "<tool_use",
|
||||
// Agent-style XML tags
|
||||
"<attempt_completion", "<ask_followup_question", "<new_task", "<result"}
|
||||
|
||||
// xmlToolCallTagPairs maps each opening tag to its expected closing tag.
|
||||
// Order matters: longer/wrapper tags must be checked first.
|
||||
var xmlToolCallTagPairs = []struct{ open, close string }{
|
||||
{"<tool_calls", "</tool_calls>"},
|
||||
{"<tool_call", "</tool_call>"},
|
||||
{"<function_calls", "</function_calls>"},
|
||||
{"<function_call", "</function_call>"},
|
||||
{"<invoke", "</invoke>"},
|
||||
{"<tool_use", "</tool_use>"},
|
||||
// Agent-style: these are XML "tool call" patterns from coding agents.
|
||||
// They get captured → parsed. If parsing fails, the raw XML is preserved
|
||||
// so the caller can still see the original text.
|
||||
{"<attempt_completion", "</attempt_completion>"},
|
||||
{"<ask_followup_question", "</ask_followup_question>"},
|
||||
{"<new_task", "</new_task>"},
|
||||
}
|
||||
|
||||
// xmlToolCallBlockPattern matches a complete XML tool call block (wrapper or standalone).
|
||||
//
|
||||
//nolint:unused // reserved for future fast-path XML block detection.
|
||||
var xmlToolCallBlockPattern = regexp.MustCompile(`(?is)(<tool_calls>\s*(?:.*?)\s*</tool_calls>|<tool_call>\s*(?:.*?)\s*</tool_call>|<invoke\b[^>]*>(?:.*?)</invoke>|<function_calls?\b[^>]*>(?:.*?)</function_calls?>|<tool_use>(?:.*?)</tool_use>|<attempt_completion>(?:.*?)</attempt_completion>|<ask_followup_question>(?:.*?)</ask_followup_question>|<new_task>(?:.*?)</new_task>)`)
|
||||
|
||||
// xmlToolTagsToDetect is the set of XML tag prefixes used by findToolSegmentStart.
|
||||
var xmlToolTagsToDetect = []string{"<tool_calls>", "<tool_calls\n", "<tool_call>", "<tool_call\n",
|
||||
"<invoke ", "<invoke>", "<function_call", "<function_calls", "<tool_use>",
|
||||
// Agent-style tags
|
||||
"<attempt_completion>", "<ask_followup_question>", "<new_task>"}
|
||||
|
||||
// consumeXMLToolCapture tries to extract complete XML tool call blocks from captured text.
|
||||
func consumeXMLToolCapture(captured string, toolNames []string) (prefix string, calls []toolcall.ParsedToolCall, suffix string, ready bool) {
|
||||
lower := strings.ToLower(captured)
|
||||
// Find the FIRST matching open/close pair, preferring wrapper tags.
|
||||
// Tag pairs are ordered longest-first (e.g. <tool_calls before <tool_call)
|
||||
// so wrapper tags are checked before inner tags.
|
||||
for _, pair := range xmlToolCallTagPairs {
|
||||
openIdx := strings.Index(lower, pair.open)
|
||||
if openIdx < 0 {
|
||||
continue
|
||||
}
|
||||
// Find the LAST occurrence of the specific closing tag to get the outermost block.
|
||||
closeIdx := strings.LastIndex(lower, pair.close)
|
||||
if closeIdx < openIdx {
|
||||
// Opening tag is present but its specific closing tag hasn't arrived.
|
||||
// Return not-ready so we keep buffering — do NOT fall through to
|
||||
// try inner pairs (e.g. <tool_call inside <tool_calls).
|
||||
return "", nil, "", false
|
||||
}
|
||||
closeEnd := closeIdx + len(pair.close)
|
||||
|
||||
xmlBlock := captured[openIdx:closeEnd]
|
||||
prefixPart := captured[:openIdx]
|
||||
suffixPart := captured[closeEnd:]
|
||||
parsed := toolcall.ParseToolCalls(xmlBlock, toolNames)
|
||||
if len(parsed) > 0 {
|
||||
prefixPart, suffixPart = trimWrappingJSONFence(prefixPart, suffixPart)
|
||||
return prefixPart, parsed, suffixPart, true
|
||||
}
|
||||
// If this block failed to become a tool call, pass it through as text.
|
||||
return prefixPart + xmlBlock, nil, suffixPart, true
|
||||
}
|
||||
return "", nil, "", false
|
||||
}
|
||||
|
||||
// hasOpenXMLToolTag returns true if captured text contains an XML tool opening tag
|
||||
// whose SPECIFIC closing tag has not appeared yet.
|
||||
func hasOpenXMLToolTag(captured string) bool {
|
||||
lower := strings.ToLower(captured)
|
||||
for _, pair := range xmlToolCallTagPairs {
|
||||
if strings.Contains(lower, pair.open) {
|
||||
if !strings.Contains(lower, pair.close) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findPartialXMLToolTagStart checks if the string ends with a partial XML tool tag
|
||||
// (e.g., "<tool_ca" or "<inv") and returns the position of the '<'.
|
||||
func findPartialXMLToolTagStart(s string) int {
|
||||
lastLT := strings.LastIndex(s, "<")
|
||||
if lastLT < 0 {
|
||||
return -1
|
||||
}
|
||||
tail := s[lastLT:]
|
||||
// If there's a '>' in the tail, the tag is closed — not partial.
|
||||
if strings.Contains(tail, ">") {
|
||||
return -1
|
||||
}
|
||||
lowerTail := strings.ToLower(tail)
|
||||
// Check if the tail is a prefix of any known XML tool tag.
|
||||
for _, tag := range xmlToolCallOpeningTags {
|
||||
tagWithLT := tag
|
||||
if !strings.HasPrefix(tagWithLT, "<") {
|
||||
tagWithLT = "<" + tagWithLT
|
||||
}
|
||||
if strings.HasPrefix(tagWithLT, lowerTail) {
|
||||
return lastLT
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
@@ -1,506 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestProcessToolSieveInterceptsXMLToolCallWithoutLeak(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
// Simulate a model producing XML tool call output chunk by chunk.
|
||||
chunks := []string{
|
||||
"<tool_calls>\n",
|
||||
" <tool_call>\n",
|
||||
" <tool_name>read_file</tool_name>\n",
|
||||
` <parameters>{"path":"README.MD"}</parameters>` + "\n",
|
||||
" </tool_call>\n",
|
||||
"</tool_calls>",
|
||||
}
|
||||
var events []toolStreamEvent
|
||||
for _, c := range chunks {
|
||||
events = append(events, processToolSieveChunk(&state, c, []string{"read_file"})...)
|
||||
}
|
||||
events = append(events, flushToolSieve(&state, []string{"read_file"})...)
|
||||
|
||||
var textContent string
|
||||
var toolCalls int
|
||||
for _, evt := range events {
|
||||
if evt.Content != "" {
|
||||
textContent += evt.Content
|
||||
}
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
|
||||
if strings.Contains(textContent, "<tool_call") {
|
||||
t.Fatalf("XML tool call content leaked to text: %q", textContent)
|
||||
}
|
||||
if strings.Contains(textContent, "read_file") {
|
||||
t.Fatalf("tool name leaked to text: %q", textContent)
|
||||
}
|
||||
if toolCalls == 0 {
|
||||
t.Fatal("expected tool calls to be extracted, got none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessToolSieveHandlesLongXMLToolCall(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
const toolName = "write_to_file"
|
||||
payload := strings.Repeat("x", 4096)
|
||||
splitAt := len(payload) / 2
|
||||
chunks := []string{
|
||||
"<tool_calls>\n <tool_call>\n <tool_name>" + toolName + "</tool_name>\n <parameters>\n <content><![CDATA[",
|
||||
payload[:splitAt],
|
||||
payload[splitAt:],
|
||||
"]]></content>\n </parameters>\n </tool_call>\n</tool_calls>",
|
||||
}
|
||||
|
||||
var events []toolStreamEvent
|
||||
for _, c := range chunks {
|
||||
events = append(events, processToolSieveChunk(&state, c, []string{toolName})...)
|
||||
}
|
||||
events = append(events, flushToolSieve(&state, []string{toolName})...)
|
||||
|
||||
var textContent strings.Builder
|
||||
toolCalls := 0
|
||||
var gotPayload any
|
||||
for _, evt := range events {
|
||||
if evt.Content != "" {
|
||||
textContent.WriteString(evt.Content)
|
||||
}
|
||||
if len(evt.ToolCalls) > 0 && gotPayload == nil {
|
||||
gotPayload = evt.ToolCalls[0].Input["content"]
|
||||
}
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
|
||||
if toolCalls != 1 {
|
||||
t.Fatalf("expected one long XML tool call, got %d events=%#v", toolCalls, events)
|
||||
}
|
||||
if textContent.Len() != 0 {
|
||||
t.Fatalf("expected no leaked text for long XML tool call, got %q", textContent.String())
|
||||
}
|
||||
got, _ := gotPayload.(string)
|
||||
if got != payload {
|
||||
t.Fatalf("expected long XML payload to survive intact, got len=%d want=%d", len(got), len(payload))
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessToolSieveXMLWithLeadingText(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
// Model outputs some prose then an XML tool call.
|
||||
chunks := []string{
|
||||
"Let me check the file.\n",
|
||||
"<tool_calls>\n <tool_call>\n <tool_name>read_file</tool_name>\n",
|
||||
` <parameters>{"path":"go.mod"}</parameters>` + "\n </tool_call>\n</tool_calls>",
|
||||
}
|
||||
var events []toolStreamEvent
|
||||
for _, c := range chunks {
|
||||
events = append(events, processToolSieveChunk(&state, c, []string{"read_file"})...)
|
||||
}
|
||||
events = append(events, flushToolSieve(&state, []string{"read_file"})...)
|
||||
|
||||
var textContent string
|
||||
var toolCalls int
|
||||
for _, evt := range events {
|
||||
if evt.Content != "" {
|
||||
textContent += evt.Content
|
||||
}
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
|
||||
// Leading text should be emitted.
|
||||
if !strings.Contains(textContent, "Let me check the file.") {
|
||||
t.Fatalf("expected leading text to be emitted, got %q", textContent)
|
||||
}
|
||||
// The XML itself should NOT leak.
|
||||
if strings.Contains(textContent, "<tool_call") {
|
||||
t.Fatalf("XML tool call content leaked to text: %q", textContent)
|
||||
}
|
||||
if toolCalls == 0 {
|
||||
t.Fatal("expected tool calls to be extracted, got none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessToolSievePassesThroughNonToolXMLBlock(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
chunk := `<tool_call><title>示例 XML</title><body>plain text xml payload</body></tool_call>`
|
||||
events := processToolSieveChunk(&state, chunk, []string{"read_file"})
|
||||
events = append(events, flushToolSieve(&state, []string{"read_file"})...)
|
||||
|
||||
var textContent strings.Builder
|
||||
toolCalls := 0
|
||||
for _, evt := range events {
|
||||
textContent.WriteString(evt.Content)
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
if toolCalls != 0 {
|
||||
t.Fatalf("expected no tool calls for plain XML payload, got %d events=%#v", toolCalls, events)
|
||||
}
|
||||
if textContent.String() != chunk {
|
||||
t.Fatalf("expected XML payload to pass through unchanged, got %q", textContent.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessToolSieveNonToolXMLKeepsSuffixForToolParsing(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
chunk := `<tool_call><title>plain xml</title></tool_call><invoke name="read_file"><parameters>{"path":"README.MD"}</parameters></invoke>`
|
||||
events := processToolSieveChunk(&state, chunk, []string{"read_file"})
|
||||
events = append(events, flushToolSieve(&state, []string{"read_file"})...)
|
||||
|
||||
var textContent strings.Builder
|
||||
toolCalls := 0
|
||||
for _, evt := range events {
|
||||
textContent.WriteString(evt.Content)
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
if !strings.Contains(textContent.String(), `<tool_call><title>plain xml</title></tool_call>`) {
|
||||
t.Fatalf("expected leading non-tool XML to be preserved, got %q", textContent.String())
|
||||
}
|
||||
if strings.Contains(textContent.String(), `<invoke name="read_file">`) {
|
||||
t.Fatalf("expected invoke tool XML to be intercepted, got %q", textContent.String())
|
||||
}
|
||||
if toolCalls != 1 {
|
||||
t.Fatalf("expected exactly one parsed tool call from suffix, got %d events=%#v", toolCalls, events)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessToolSievePassesThroughMalformedExecutableXMLBlock(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
chunk := `<tool_call><parameters>{"path":"README.md"}</parameters></tool_call>`
|
||||
events := processToolSieveChunk(&state, chunk, []string{"read_file"})
|
||||
events = append(events, flushToolSieve(&state, []string{"read_file"})...)
|
||||
|
||||
var textContent strings.Builder
|
||||
toolCalls := 0
|
||||
for _, evt := range events {
|
||||
textContent.WriteString(evt.Content)
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
|
||||
if toolCalls != 0 {
|
||||
t.Fatalf("expected malformed executable-looking XML to stay text, got %d events=%#v", toolCalls, events)
|
||||
}
|
||||
if textContent.String() != chunk {
|
||||
t.Fatalf("expected malformed executable-looking XML to pass through unchanged, got %q", textContent.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessToolSievePassesThroughFencedXMLToolCallExamples(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
input := strings.Join([]string{
|
||||
"Before first example.\n```",
|
||||
"xml\n<tool_call><tool_name>read_file</tool_name><parameters>{\"path\":\"README.md\"}</parameters></tool_call>\n```\n",
|
||||
"Between examples.\n```xml\n",
|
||||
"<tool_call><tool_name>search</tool_name><parameters>{\"q\":\"golang\"}</parameters></tool_call>\n",
|
||||
"```\nAfter examples.",
|
||||
}, "")
|
||||
|
||||
chunks := []string{
|
||||
"Before first example.\n```",
|
||||
"xml\n<tool_call><tool_name>read_file</tool_name><parameters>{\"path\":\"README.md\"}</parameters></tool_call>\n```\n",
|
||||
"Between examples.\n```xml\n",
|
||||
"<tool_call><tool_name>search</tool_name><parameters>{\"q\":\"golang\"}</parameters></tool_call>\n",
|
||||
"```\nAfter examples.",
|
||||
}
|
||||
|
||||
var events []toolStreamEvent
|
||||
for _, c := range chunks {
|
||||
events = append(events, processToolSieveChunk(&state, c, []string{"read_file", "search"})...)
|
||||
}
|
||||
events = append(events, flushToolSieve(&state, []string{"read_file", "search"})...)
|
||||
|
||||
var textContent strings.Builder
|
||||
toolCalls := 0
|
||||
for _, evt := range events {
|
||||
if evt.Content != "" {
|
||||
textContent.WriteString(evt.Content)
|
||||
}
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
|
||||
if toolCalls != 0 {
|
||||
t.Fatalf("expected fenced XML examples to stay text, got %d tool calls events=%#v", toolCalls, events)
|
||||
}
|
||||
if textContent.String() != input {
|
||||
t.Fatalf("expected fenced XML examples to pass through unchanged, got %q", textContent.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessToolSieveKeepsPartialXMLTagInsideFencedExample(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
input := strings.Join([]string{
|
||||
"Example:\n```xml\n<tool_ca",
|
||||
"ll><tool_name>read_file</tool_name><parameters>{\"path\":\"README.md\"}</parameters></tool_call>\n```\n",
|
||||
"Done.",
|
||||
}, "")
|
||||
|
||||
chunks := []string{
|
||||
"Example:\n```xml\n<tool_ca",
|
||||
"ll><tool_name>read_file</tool_name><parameters>{\"path\":\"README.md\"}</parameters></tool_call>\n```\n",
|
||||
"Done.",
|
||||
}
|
||||
|
||||
var events []toolStreamEvent
|
||||
for _, c := range chunks {
|
||||
events = append(events, processToolSieveChunk(&state, c, []string{"read_file"})...)
|
||||
}
|
||||
events = append(events, flushToolSieve(&state, []string{"read_file"})...)
|
||||
|
||||
var textContent strings.Builder
|
||||
toolCalls := 0
|
||||
for _, evt := range events {
|
||||
if evt.Content != "" {
|
||||
textContent.WriteString(evt.Content)
|
||||
}
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
|
||||
if toolCalls != 0 {
|
||||
t.Fatalf("expected partial fenced XML to stay text, got %d tool calls events=%#v", toolCalls, events)
|
||||
}
|
||||
if textContent.String() != input {
|
||||
t.Fatalf("expected partial fenced XML to pass through unchanged, got %q", textContent.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessToolSievePartialXMLTagHeldBack(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
// Chunk ends with a partial XML tool tag.
|
||||
events := processToolSieveChunk(&state, "Hello <tool_ca", []string{"read_file"})
|
||||
|
||||
var textContent string
|
||||
for _, evt := range events {
|
||||
textContent += evt.Content
|
||||
}
|
||||
|
||||
// "Hello " should be emitted, but "<tool_ca" should be held back.
|
||||
if strings.Contains(textContent, "<tool_ca") {
|
||||
t.Fatalf("partial XML tag should not be emitted, got %q", textContent)
|
||||
}
|
||||
if !strings.Contains(textContent, "Hello") {
|
||||
t.Fatalf("expected 'Hello' text to be emitted, got %q", textContent)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindToolSegmentStartDetectsXMLToolCalls(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
input string
|
||||
want int
|
||||
}{
|
||||
{"tool_calls_tag", "some text <tool_calls>\n", 10},
|
||||
{"tool_call_tag", "prefix <tool_call>\n", 7},
|
||||
{"invoke_tag", "text <invoke name=\"foo\">body</invoke>", 5},
|
||||
{"xml_inside_code_fence", "```xml\n<tool_call><tool_name>read_file</tool_name></tool_call>\n```", -1},
|
||||
{"function_call_tag", "<function_call name=\"foo\">body</function_call>", 0},
|
||||
{"no_xml", "just plain text", -1},
|
||||
{"gemini_json_no_detect", `some text {"functionCall":{"name":"search"}}`, -1},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := findToolSegmentStart(nil, tc.input)
|
||||
if got != tc.want {
|
||||
t.Fatalf("findToolSegmentStart(%q) = %d, want %d", tc.input, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindPartialXMLToolTagStart(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
input string
|
||||
want int
|
||||
}{
|
||||
{"partial_tool_call", "Hello <tool_ca", 6},
|
||||
{"partial_invoke", "Prefix <inv", 7},
|
||||
{"partial_lt_only", "Text <", 5},
|
||||
{"complete_tag", "Text <tool_call>done", -1},
|
||||
{"no_lt", "plain text", -1},
|
||||
{"closed_lt", "a < b > c", -1},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := findPartialXMLToolTagStart(tc.input)
|
||||
if got != tc.want {
|
||||
t.Fatalf("findPartialXMLToolTagStart(%q) = %d, want %d", tc.input, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasOpenXMLToolTag(t *testing.T) {
|
||||
if !hasOpenXMLToolTag("<tool_call>\n<tool_name>foo</tool_name>") {
|
||||
t.Fatal("should detect open XML tool tag without closing tag")
|
||||
}
|
||||
if hasOpenXMLToolTag("<tool_call>\n<tool_name>foo</tool_name></tool_call>") {
|
||||
t.Fatal("should return false when closing tag is present")
|
||||
}
|
||||
if hasOpenXMLToolTag("plain text without any XML") {
|
||||
t.Fatal("should return false for plain text")
|
||||
}
|
||||
}
|
||||
|
||||
// Test the EXACT scenario the user reports: token-by-token streaming where
|
||||
// <tool_calls> tag arrives in small pieces.
|
||||
func TestProcessToolSieveTokenByTokenXMLNoLeak(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
// Simulate DeepSeek model generating tokens one at a time.
|
||||
chunks := []string{
|
||||
"<",
|
||||
"tool",
|
||||
"_calls",
|
||||
">\n",
|
||||
" <",
|
||||
"tool",
|
||||
"_call",
|
||||
">\n",
|
||||
" <",
|
||||
"tool",
|
||||
"_name",
|
||||
">",
|
||||
"read",
|
||||
"_file",
|
||||
"</",
|
||||
"tool",
|
||||
"_name",
|
||||
">\n",
|
||||
" <",
|
||||
"parameters",
|
||||
">",
|
||||
`{"path"`,
|
||||
`: "README.MD"`,
|
||||
`}`,
|
||||
"</",
|
||||
"parameters",
|
||||
">\n",
|
||||
" </",
|
||||
"tool",
|
||||
"_call",
|
||||
">\n",
|
||||
"</",
|
||||
"tool",
|
||||
"_calls",
|
||||
">",
|
||||
}
|
||||
var events []toolStreamEvent
|
||||
for _, c := range chunks {
|
||||
events = append(events, processToolSieveChunk(&state, c, []string{"read_file"})...)
|
||||
}
|
||||
events = append(events, flushToolSieve(&state, []string{"read_file"})...)
|
||||
|
||||
var textContent string
|
||||
var toolCalls int
|
||||
for _, evt := range events {
|
||||
if evt.Content != "" {
|
||||
textContent += evt.Content
|
||||
}
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
|
||||
if strings.Contains(textContent, "<tool_call") {
|
||||
t.Fatalf("XML tool call content leaked to text in token-by-token mode: %q", textContent)
|
||||
}
|
||||
if strings.Contains(textContent, "tool_calls>") {
|
||||
t.Fatalf("closing tag fragment leaked to text: %q", textContent)
|
||||
}
|
||||
if strings.Contains(textContent, "read_file") {
|
||||
t.Fatalf("tool name leaked to text: %q", textContent)
|
||||
}
|
||||
if toolCalls == 0 {
|
||||
t.Fatal("expected tool calls to be extracted, got none")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that flushToolSieve on incomplete XML falls back to raw text.
|
||||
func TestFlushToolSieveIncompleteXMLFallsBackToText(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
// XML block starts but stream ends before completion.
|
||||
chunks := []string{
|
||||
"<tool_calls>\n",
|
||||
" <tool_call>\n",
|
||||
" <tool_name>read_file</tool_name>\n",
|
||||
}
|
||||
var events []toolStreamEvent
|
||||
for _, c := range chunks {
|
||||
events = append(events, processToolSieveChunk(&state, c, []string{"read_file"})...)
|
||||
}
|
||||
// Stream ends abruptly - flush should NOT dump raw XML.
|
||||
events = append(events, flushToolSieve(&state, []string{"read_file"})...)
|
||||
|
||||
var textContent string
|
||||
for _, evt := range events {
|
||||
if evt.Content != "" {
|
||||
textContent += evt.Content
|
||||
}
|
||||
}
|
||||
|
||||
if textContent != strings.Join(chunks, "") {
|
||||
t.Fatalf("expected incomplete XML to fall back to raw text, got %q", textContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the opening tag "<tool_calls>\n " is NOT emitted as text content.
|
||||
func TestOpeningXMLTagNotLeakedAsContent(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
// First chunk is the opening tag - should be held, not emitted.
|
||||
evts1 := processToolSieveChunk(&state, "<tool_calls>\n ", []string{"read_file"})
|
||||
for _, evt := range evts1 {
|
||||
if strings.Contains(evt.Content, "<tool_calls>") {
|
||||
t.Fatalf("opening tag leaked on first chunk: %q", evt.Content)
|
||||
}
|
||||
}
|
||||
|
||||
// Remaining content arrives.
|
||||
evts2 := processToolSieveChunk(&state, "<tool_call>\n <tool_name>read_file</tool_name>\n <parameters>{\"path\":\"README.MD\"}</parameters>\n </tool_call>\n</tool_calls>", []string{"read_file"})
|
||||
evts2 = append(evts2, flushToolSieve(&state, []string{"read_file"})...)
|
||||
|
||||
var textContent string
|
||||
var toolCalls int
|
||||
allEvents := append(evts1, evts2...)
|
||||
for _, evt := range allEvents {
|
||||
if evt.Content != "" {
|
||||
textContent += evt.Content
|
||||
}
|
||||
toolCalls += len(evt.ToolCalls)
|
||||
}
|
||||
|
||||
if strings.Contains(textContent, "<tool_call") {
|
||||
t.Fatalf("XML content leaked: %q", textContent)
|
||||
}
|
||||
if toolCalls == 0 {
|
||||
t.Fatal("expected tool calls to be extracted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessToolSieveFallsBackToRawAttemptCompletion(t *testing.T) {
|
||||
var state toolStreamSieveState
|
||||
// Simulate an agent outputting attempt_completion XML tag.
|
||||
// If it does not parse as a tool call, it should fall back to raw text.
|
||||
chunks := []string{
|
||||
"Done with task.\n",
|
||||
"<attempt_completion>\n",
|
||||
" <result>Here is the answer</result>\n",
|
||||
"</attempt_completion>",
|
||||
}
|
||||
var events []toolStreamEvent
|
||||
for _, c := range chunks {
|
||||
events = append(events, processToolSieveChunk(&state, c, []string{"attempt_completion"})...)
|
||||
}
|
||||
events = append(events, flushToolSieve(&state, []string{"attempt_completion"})...)
|
||||
|
||||
var textContent string
|
||||
for _, evt := range events {
|
||||
if evt.Content != "" {
|
||||
textContent += evt.Content
|
||||
}
|
||||
}
|
||||
|
||||
if !strings.Contains(textContent, "Done with task.\n") {
|
||||
t.Fatalf("expected leading text to be emitted, got %q", textContent)
|
||||
}
|
||||
|
||||
if textContent != strings.Join(chunks, "") {
|
||||
t.Fatalf("expected agent XML to fall back to raw text, got %q", textContent)
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package openai
|
||||
|
||||
import "net/http"
|
||||
|
||||
func shouldWriteUpstreamEmptyOutputError(text string) bool {
|
||||
return text == ""
|
||||
}
|
||||
|
||||
func upstreamEmptyOutputDetail(contentFilter bool, text, thinking string) (int, string, string) {
|
||||
_ = text
|
||||
if contentFilter {
|
||||
return http.StatusBadRequest, "Upstream content filtered the response and returned no output.", "content_filter"
|
||||
}
|
||||
if thinking != "" {
|
||||
return http.StatusTooManyRequests, "Upstream model returned reasoning without visible output.", "upstream_empty_output"
|
||||
}
|
||||
return http.StatusTooManyRequests, "Upstream model returned empty output.", "upstream_empty_output"
|
||||
}
|
||||
|
||||
func writeUpstreamEmptyOutputError(w http.ResponseWriter, text string, contentFilter bool) bool {
|
||||
if !shouldWriteUpstreamEmptyOutputError(text) {
|
||||
return false
|
||||
}
|
||||
status, message, code := upstreamEmptyOutputDetail(contentFilter, text, "")
|
||||
writeOpenAIErrorWithCode(w, status, message, code)
|
||||
return true
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"ds2api/internal/auth"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestIsVercelStreamPrepareRequest(t *testing.T) {
|
||||
req := httptest.NewRequest("POST", "/v1/chat/completions?__stream_prepare=1", nil)
|
||||
if !isVercelStreamPrepareRequest(req) {
|
||||
t.Fatalf("expected prepare request to be detected")
|
||||
}
|
||||
|
||||
req2 := httptest.NewRequest("POST", "/v1/chat/completions", nil)
|
||||
if isVercelStreamPrepareRequest(req2) {
|
||||
t.Fatalf("expected non-prepare request")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsVercelStreamReleaseRequest(t *testing.T) {
|
||||
req := httptest.NewRequest("POST", "/v1/chat/completions?__stream_release=1", nil)
|
||||
if !isVercelStreamReleaseRequest(req) {
|
||||
t.Fatalf("expected release request to be detected")
|
||||
}
|
||||
|
||||
req2 := httptest.NewRequest("POST", "/v1/chat/completions", nil)
|
||||
if isVercelStreamReleaseRequest(req2) {
|
||||
t.Fatalf("expected non-release request")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVercelInternalSecret(t *testing.T) {
|
||||
t.Run("prefer explicit secret", func(t *testing.T) {
|
||||
t.Setenv("DS2API_VERCEL_INTERNAL_SECRET", "stream-secret")
|
||||
t.Setenv("DS2API_ADMIN_KEY", "admin-fallback")
|
||||
if got := vercelInternalSecret(); got != "stream-secret" {
|
||||
t.Fatalf("expected explicit secret, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("fallback to admin key", func(t *testing.T) {
|
||||
t.Setenv("DS2API_VERCEL_INTERNAL_SECRET", "")
|
||||
t.Setenv("DS2API_ADMIN_KEY", "admin-fallback")
|
||||
if got := vercelInternalSecret(); got != "admin-fallback" {
|
||||
t.Fatalf("expected admin key fallback, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("default admin when env missing", func(t *testing.T) {
|
||||
t.Setenv("DS2API_VERCEL_INTERNAL_SECRET", "")
|
||||
t.Setenv("DS2API_ADMIN_KEY", "")
|
||||
if got := vercelInternalSecret(); got != "admin" {
|
||||
t.Fatalf("expected default admin fallback, got %q", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestStreamLeaseLifecycle(t *testing.T) {
|
||||
h := &Handler{}
|
||||
leaseID := h.holdStreamLease(&auth.RequestAuth{UseConfigToken: false})
|
||||
if leaseID == "" {
|
||||
t.Fatalf("expected non-empty lease id")
|
||||
}
|
||||
if ok := h.releaseStreamLease(leaseID); !ok {
|
||||
t.Fatalf("expected lease release success")
|
||||
}
|
||||
if ok := h.releaseStreamLease(leaseID); ok {
|
||||
t.Fatalf("expected duplicate release to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamLeaseTTL(t *testing.T) {
|
||||
t.Setenv("DS2API_VERCEL_STREAM_LEASE_TTL_SECONDS", "120")
|
||||
if got := streamLeaseTTL(); got != 120*time.Second {
|
||||
t.Fatalf("expected ttl=120s, got %v", got)
|
||||
}
|
||||
t.Setenv("DS2API_VERCEL_STREAM_LEASE_TTL_SECONDS", "invalid")
|
||||
if got := streamLeaseTTL(); got != 15*time.Minute {
|
||||
t.Fatalf("expected default ttl on invalid value, got %v", got)
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"ds2api/internal/chathistory"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
Store ConfigStore
|
||||
Pool PoolController
|
||||
DS DeepSeekCaller
|
||||
OpenAI OpenAIChatCaller
|
||||
ChatHistory *chathistory.Store
|
||||
}
|
||||
|
||||
func RegisterRoutes(r chi.Router, h *Handler) {
|
||||
r.Post("/login", h.login)
|
||||
r.Get("/verify", h.verify)
|
||||
r.Group(func(pr chi.Router) {
|
||||
pr.Use(h.requireAdmin)
|
||||
pr.Get("/vercel/config", h.getVercelConfig)
|
||||
pr.Get("/config", h.getConfig)
|
||||
pr.Post("/config", h.updateConfig)
|
||||
pr.Get("/settings", h.getSettings)
|
||||
pr.Put("/settings", h.updateSettings)
|
||||
pr.Post("/settings/password", h.updateSettingsPassword)
|
||||
pr.Post("/config/import", h.configImport)
|
||||
pr.Get("/config/export", h.configExport)
|
||||
pr.Post("/keys", h.addKey)
|
||||
pr.Put("/keys/{key}", h.updateKey)
|
||||
pr.Delete("/keys/{key}", h.deleteKey)
|
||||
pr.Get("/proxies", h.listProxies)
|
||||
pr.Post("/proxies", h.addProxy)
|
||||
pr.Put("/proxies/{proxyID}", h.updateProxy)
|
||||
pr.Delete("/proxies/{proxyID}", h.deleteProxy)
|
||||
pr.Post("/proxies/test", h.testProxy)
|
||||
pr.Get("/accounts", h.listAccounts)
|
||||
pr.Post("/accounts", h.addAccount)
|
||||
pr.Put("/accounts/{identifier}", h.updateAccount)
|
||||
pr.Delete("/accounts/{identifier}", h.deleteAccount)
|
||||
pr.Put("/accounts/{identifier}/proxy", h.updateAccountProxy)
|
||||
pr.Get("/queue/status", h.queueStatus)
|
||||
pr.Post("/accounts/test", h.testSingleAccount)
|
||||
pr.Post("/accounts/test-all", h.testAllAccounts)
|
||||
pr.Post("/accounts/sessions/delete-all", h.deleteAllSessions)
|
||||
pr.Post("/import", h.batchImport)
|
||||
pr.Post("/test", h.testAPI)
|
||||
pr.Post("/dev/raw-samples/capture", h.captureRawSample)
|
||||
pr.Get("/dev/raw-samples/query", h.queryRawSampleCaptures)
|
||||
pr.Post("/dev/raw-samples/save", h.saveRawSampleFromCaptures)
|
||||
pr.Post("/vercel/sync", h.syncVercel)
|
||||
pr.Get("/vercel/status", h.vercelStatus)
|
||||
pr.Post("/vercel/status", h.vercelStatus)
|
||||
pr.Get("/export", h.exportConfig)
|
||||
pr.Get("/dev/captures", h.getDevCaptures)
|
||||
pr.Delete("/dev/captures", h.clearDevCaptures)
|
||||
pr.Get("/chat-history", h.getChatHistory)
|
||||
pr.Get("/chat-history/{id}", h.getChatHistoryItem)
|
||||
pr.Delete("/chat-history", h.clearChatHistory)
|
||||
pr.Delete("/chat-history/{id}", h.deleteChatHistoryItem)
|
||||
pr.Put("/chat-history/settings", h.updateChatHistorySettings)
|
||||
pr.Get("/version", h.getVersion)
|
||||
})
|
||||
}
|
||||
64
internal/assistantturn/stream.go
Normal file
64
internal/assistantturn/stream.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package assistantturn
|
||||
|
||||
import (
|
||||
"ds2api/internal/httpapi/openai/shared"
|
||||
"ds2api/internal/sse"
|
||||
)
|
||||
|
||||
type StreamEventType string
|
||||
|
||||
const (
|
||||
StreamEventTextDelta StreamEventType = "text_delta"
|
||||
StreamEventThinkingDelta StreamEventType = "thinking_delta"
|
||||
StreamEventToolCall StreamEventType = "tool_call"
|
||||
StreamEventDone StreamEventType = "done"
|
||||
StreamEventError StreamEventType = "error"
|
||||
StreamEventPing StreamEventType = "ping"
|
||||
)
|
||||
|
||||
type StreamEvent struct {
|
||||
Type StreamEventType
|
||||
Text string
|
||||
Thinking string
|
||||
ToolCall any
|
||||
Error *OutputError
|
||||
Usage *Usage
|
||||
}
|
||||
|
||||
type Accumulator struct {
|
||||
inner shared.StreamAccumulator
|
||||
}
|
||||
|
||||
type AccumulatorOptions struct {
|
||||
ThinkingEnabled bool
|
||||
SearchEnabled bool
|
||||
StripReferenceMarkers bool
|
||||
}
|
||||
|
||||
func NewAccumulator(opts AccumulatorOptions) *Accumulator {
|
||||
return &Accumulator{
|
||||
inner: shared.StreamAccumulator{
|
||||
ThinkingEnabled: opts.ThinkingEnabled,
|
||||
SearchEnabled: opts.SearchEnabled,
|
||||
StripReferenceMarkers: opts.StripReferenceMarkers,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Accumulator) Apply(parsed sse.LineResult) shared.StreamAccumulatorResult {
|
||||
if a == nil {
|
||||
return shared.StreamAccumulatorResult{}
|
||||
}
|
||||
return a.inner.Apply(parsed)
|
||||
}
|
||||
|
||||
func (a *Accumulator) Snapshot() (rawText, text, rawThinking, thinking, detectionThinking string) {
|
||||
if a == nil {
|
||||
return "", "", "", "", ""
|
||||
}
|
||||
return a.inner.RawText.String(),
|
||||
a.inner.Text.String(),
|
||||
a.inner.RawThinking.String(),
|
||||
a.inner.Thinking.String(),
|
||||
a.inner.ToolDetectionThinking.String()
|
||||
}
|
||||
285
internal/assistantturn/turn.go
Normal file
285
internal/assistantturn/turn.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package assistantturn
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"ds2api/internal/httpapi/openai/shared"
|
||||
"ds2api/internal/promptcompat"
|
||||
"ds2api/internal/sse"
|
||||
"ds2api/internal/toolcall"
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
type StopReason string
|
||||
|
||||
const (
|
||||
StopReasonStop StopReason = "stop"
|
||||
StopReasonToolCalls StopReason = "tool_calls"
|
||||
StopReasonContentFilter StopReason = "content_filter"
|
||||
StopReasonError StopReason = "error"
|
||||
)
|
||||
|
||||
type Usage struct {
|
||||
InputTokens int
|
||||
OutputTokens int
|
||||
ReasoningTokens int
|
||||
TotalTokens int
|
||||
}
|
||||
|
||||
type OutputError struct {
|
||||
Status int
|
||||
Message string
|
||||
Code string
|
||||
}
|
||||
|
||||
type Turn struct {
|
||||
Model string
|
||||
Prompt string
|
||||
RawText string
|
||||
RawThinking string
|
||||
DetectionThinking string
|
||||
Text string
|
||||
Thinking string
|
||||
ToolCalls []toolcall.ParsedToolCall
|
||||
ParsedToolCalls toolcall.ToolCallParseResult
|
||||
CitationLinks map[int]string
|
||||
ContentFilter bool
|
||||
ResponseMessageID int
|
||||
StopReason StopReason
|
||||
Usage Usage
|
||||
Error *OutputError
|
||||
}
|
||||
|
||||
type FinalizeOptions struct {
|
||||
AlreadyEmittedToolCalls bool
|
||||
}
|
||||
|
||||
type FinalOutcome struct {
|
||||
FinishReason string
|
||||
Error *OutputError
|
||||
Usage Usage
|
||||
HasToolCalls bool
|
||||
HasVisibleText bool
|
||||
HasVisibleOutput bool
|
||||
ShouldFail bool
|
||||
}
|
||||
|
||||
type BuildOptions struct {
|
||||
Model string
|
||||
Prompt string
|
||||
RefFileTokens int
|
||||
SearchEnabled bool
|
||||
StripReferenceMarkers bool
|
||||
ToolNames []string
|
||||
ToolsRaw any
|
||||
ToolChoice promptcompat.ToolChoicePolicy
|
||||
}
|
||||
|
||||
type StreamSnapshot struct {
|
||||
RawText string
|
||||
VisibleText string
|
||||
RawThinking string
|
||||
VisibleThinking string
|
||||
DetectionThinking string
|
||||
ContentFilter bool
|
||||
CitationLinks map[int]string
|
||||
ResponseMessageID int
|
||||
AlreadyEmittedCalls bool
|
||||
AdditionalToolCalls []toolcall.ParsedToolCall
|
||||
AlreadyEmittedToolRaw bool
|
||||
}
|
||||
|
||||
func BuildTurnFromCollected(result sse.CollectResult, opts BuildOptions) Turn {
|
||||
thinking := shared.CleanVisibleOutput(result.Thinking, opts.StripReferenceMarkers)
|
||||
text := shared.CleanVisibleOutput(result.Text, opts.StripReferenceMarkers)
|
||||
if opts.SearchEnabled {
|
||||
text = shared.ReplaceCitationMarkersWithLinks(text, result.CitationLinks)
|
||||
}
|
||||
|
||||
parsed := shared.DetectAssistantToolCalls(result.Text, text, result.Thinking, result.ToolDetectionThinking, opts.ToolNames)
|
||||
calls := toolcall.NormalizeParsedToolCallsForSchemas(parsed.Calls, opts.ToolsRaw)
|
||||
parsed.Calls = calls
|
||||
|
||||
stopReason := StopReasonStop
|
||||
if result.ContentFilter {
|
||||
stopReason = StopReasonContentFilter
|
||||
}
|
||||
if len(calls) > 0 {
|
||||
stopReason = StopReasonToolCalls
|
||||
}
|
||||
|
||||
turn := Turn{
|
||||
Model: opts.Model,
|
||||
Prompt: opts.Prompt,
|
||||
RawText: result.Text,
|
||||
RawThinking: result.Thinking,
|
||||
DetectionThinking: result.ToolDetectionThinking,
|
||||
Text: text,
|
||||
Thinking: thinking,
|
||||
ToolCalls: calls,
|
||||
ParsedToolCalls: parsed,
|
||||
CitationLinks: result.CitationLinks,
|
||||
ContentFilter: result.ContentFilter,
|
||||
ResponseMessageID: result.ResponseMessageID,
|
||||
StopReason: stopReason,
|
||||
}
|
||||
turn.Usage = BuildUsage(opts.Model, opts.Prompt, thinking, text, opts.RefFileTokens)
|
||||
turn.Error = ValidateTurn(turn, opts.ToolChoice)
|
||||
if turn.Error != nil {
|
||||
turn.StopReason = StopReasonError
|
||||
}
|
||||
return turn
|
||||
}
|
||||
|
||||
func BuildTurnFromStreamSnapshot(snapshot StreamSnapshot, opts BuildOptions) Turn {
|
||||
thinking := shared.CleanVisibleOutput(snapshot.VisibleThinking, opts.StripReferenceMarkers)
|
||||
text := shared.CleanVisibleOutput(snapshot.VisibleText, opts.StripReferenceMarkers)
|
||||
if opts.SearchEnabled {
|
||||
text = shared.ReplaceCitationMarkersWithLinks(text, snapshot.CitationLinks)
|
||||
}
|
||||
|
||||
parsed := shared.DetectAssistantToolCalls(snapshot.RawText, text, snapshot.RawThinking, snapshot.DetectionThinking, opts.ToolNames)
|
||||
calls := parsed.Calls
|
||||
if len(calls) == 0 && len(snapshot.AdditionalToolCalls) > 0 {
|
||||
calls = snapshot.AdditionalToolCalls
|
||||
}
|
||||
calls = toolcall.NormalizeParsedToolCallsForSchemas(calls, opts.ToolsRaw)
|
||||
parsed.Calls = calls
|
||||
|
||||
stopReason := StopReasonStop
|
||||
if snapshot.ContentFilter {
|
||||
stopReason = StopReasonContentFilter
|
||||
}
|
||||
if len(calls) > 0 || snapshot.AlreadyEmittedCalls || snapshot.AlreadyEmittedToolRaw {
|
||||
stopReason = StopReasonToolCalls
|
||||
}
|
||||
|
||||
turn := Turn{
|
||||
Model: opts.Model,
|
||||
Prompt: opts.Prompt,
|
||||
RawText: snapshot.RawText,
|
||||
RawThinking: snapshot.RawThinking,
|
||||
DetectionThinking: snapshot.DetectionThinking,
|
||||
Text: text,
|
||||
Thinking: thinking,
|
||||
ToolCalls: calls,
|
||||
ParsedToolCalls: parsed,
|
||||
CitationLinks: snapshot.CitationLinks,
|
||||
ContentFilter: snapshot.ContentFilter,
|
||||
ResponseMessageID: snapshot.ResponseMessageID,
|
||||
StopReason: stopReason,
|
||||
}
|
||||
turn.Usage = BuildUsage(opts.Model, opts.Prompt, thinking, text, opts.RefFileTokens)
|
||||
if !snapshot.AlreadyEmittedCalls && !snapshot.AlreadyEmittedToolRaw {
|
||||
turn.Error = ValidateTurn(turn, opts.ToolChoice)
|
||||
}
|
||||
if turn.Error != nil && len(calls) == 0 {
|
||||
turn.StopReason = StopReasonError
|
||||
}
|
||||
return turn
|
||||
}
|
||||
|
||||
func BuildUsage(model, prompt, thinking, text string, refFileTokens int) Usage {
|
||||
inputTokens := util.CountPromptTokens(prompt, model) + refFileTokens
|
||||
reasoningTokens := util.CountOutputTokens(thinking, model)
|
||||
outputTokens := reasoningTokens + util.CountOutputTokens(text, model)
|
||||
return Usage{
|
||||
InputTokens: inputTokens,
|
||||
OutputTokens: outputTokens,
|
||||
ReasoningTokens: reasoningTokens,
|
||||
TotalTokens: inputTokens + outputTokens,
|
||||
}
|
||||
}
|
||||
|
||||
func ValidateTurn(turn Turn, policy promptcompat.ToolChoicePolicy) *OutputError {
|
||||
if policy.IsRequired() && len(turn.ToolCalls) == 0 {
|
||||
return &OutputError{
|
||||
Status: http.StatusUnprocessableEntity,
|
||||
Message: "tool_choice requires at least one valid tool call.",
|
||||
Code: "tool_choice_violation",
|
||||
}
|
||||
}
|
||||
if len(turn.ToolCalls) > 0 {
|
||||
return nil
|
||||
}
|
||||
if strings.TrimSpace(turn.Text) != "" {
|
||||
return nil
|
||||
}
|
||||
status, message, code := UpstreamEmptyOutputDetail(turn.ContentFilter, turn.Text, turn.Thinking)
|
||||
return &OutputError{Status: status, Message: message, Code: code}
|
||||
}
|
||||
|
||||
func UpstreamEmptyOutputDetail(contentFilter bool, text, thinking string) (int, string, string) {
|
||||
_ = text
|
||||
if contentFilter {
|
||||
return http.StatusBadRequest, "Upstream content filtered the response and returned no output.", "content_filter"
|
||||
}
|
||||
if strings.TrimSpace(thinking) != "" {
|
||||
return http.StatusTooManyRequests, "Upstream account hit a rate limit and returned reasoning without visible output.", "upstream_empty_output"
|
||||
}
|
||||
return http.StatusTooManyRequests, "Upstream account hit a rate limit and returned empty output.", "upstream_empty_output"
|
||||
}
|
||||
|
||||
// ShouldRetryEmptyOutput returns true when the turn produced no visible text
|
||||
// and has no tool calls or content filter. This includes thinking-only responses,
|
||||
// where the model returned reasoning but no answer — a retry may yield text.
|
||||
func ShouldRetryEmptyOutput(turn Turn, attempts, maxAttempts int) bool {
|
||||
return attempts < maxAttempts &&
|
||||
!turn.ContentFilter &&
|
||||
len(turn.ToolCalls) == 0 &&
|
||||
strings.TrimSpace(turn.Text) == ""
|
||||
}
|
||||
|
||||
func FinalizeTurn(turn Turn, opts FinalizeOptions) FinalOutcome {
|
||||
hasToolCalls := len(turn.ToolCalls) > 0 || opts.AlreadyEmittedToolCalls
|
||||
hasVisibleText := strings.TrimSpace(turn.Text) != ""
|
||||
hasVisibleThinking := strings.TrimSpace(turn.Thinking) != ""
|
||||
err := turn.Error
|
||||
if hasToolCalls {
|
||||
err = nil
|
||||
}
|
||||
finishReason := FinishReason(turn)
|
||||
if hasToolCalls {
|
||||
finishReason = "tool_calls"
|
||||
}
|
||||
return FinalOutcome{
|
||||
FinishReason: finishReason,
|
||||
Error: err,
|
||||
Usage: turn.Usage,
|
||||
HasToolCalls: hasToolCalls,
|
||||
HasVisibleText: hasVisibleText,
|
||||
HasVisibleOutput: hasVisibleText || hasVisibleThinking || hasToolCalls,
|
||||
ShouldFail: err != nil,
|
||||
}
|
||||
}
|
||||
|
||||
func OpenAIChatUsage(turn Turn) map[string]any {
|
||||
return map[string]any{
|
||||
"prompt_tokens": turn.Usage.InputTokens,
|
||||
"completion_tokens": turn.Usage.OutputTokens,
|
||||
"total_tokens": turn.Usage.TotalTokens,
|
||||
"completion_tokens_details": map[string]any{
|
||||
"reasoning_tokens": turn.Usage.ReasoningTokens,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func OpenAIResponsesUsage(turn Turn) map[string]any {
|
||||
return map[string]any{
|
||||
"input_tokens": turn.Usage.InputTokens,
|
||||
"output_tokens": turn.Usage.OutputTokens,
|
||||
"total_tokens": turn.Usage.TotalTokens,
|
||||
}
|
||||
}
|
||||
|
||||
func FinishReason(turn Turn) string {
|
||||
switch turn.StopReason {
|
||||
case StopReasonToolCalls:
|
||||
return "tool_calls"
|
||||
case StopReasonContentFilter:
|
||||
return "content_filter"
|
||||
default:
|
||||
return "stop"
|
||||
}
|
||||
}
|
||||
127
internal/assistantturn/turn_test.go
Normal file
127
internal/assistantturn/turn_test.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package assistantturn
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"ds2api/internal/promptcompat"
|
||||
"ds2api/internal/sse"
|
||||
)
|
||||
|
||||
func TestBuildTurnFromCollectedTextCitation(t *testing.T) {
|
||||
turn := BuildTurnFromCollected(sse.CollectResult{
|
||||
Text: "See [citation:1]",
|
||||
CitationLinks: map[int]string{1: "https://example.com"},
|
||||
}, BuildOptions{Model: "deepseek-v4-flash", Prompt: "prompt", SearchEnabled: true, StripReferenceMarkers: true})
|
||||
if turn.Text != "See [1](https://example.com)" {
|
||||
t.Fatalf("text mismatch: %q", turn.Text)
|
||||
}
|
||||
if turn.StopReason != StopReasonStop {
|
||||
t.Fatalf("stop reason mismatch: %q", turn.StopReason)
|
||||
}
|
||||
if turn.Error != nil {
|
||||
t.Fatalf("unexpected error: %#v", turn.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTurnFromCollectedToolCall(t *testing.T) {
|
||||
turn := BuildTurnFromCollected(sse.CollectResult{
|
||||
Text: `<tool_calls><invoke name="Write"><parameter name="content">{"x":1}</parameter></invoke></tool_calls>`,
|
||||
}, BuildOptions{
|
||||
ToolNames: []string{"Write"},
|
||||
ToolsRaw: []any{map[string]any{
|
||||
"name": "Write",
|
||||
"input_schema": map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"content": map[string]any{"type": "string"},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
if len(turn.ToolCalls) != 1 {
|
||||
t.Fatalf("expected one tool call, got %d", len(turn.ToolCalls))
|
||||
}
|
||||
if turn.StopReason != StopReasonToolCalls {
|
||||
t.Fatalf("stop reason mismatch: %q", turn.StopReason)
|
||||
}
|
||||
if _, ok := turn.ToolCalls[0].Input["content"].(string); !ok {
|
||||
t.Fatalf("expected content coerced to string, got %#v", turn.ToolCalls[0].Input["content"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTurnFromCollectedThinkingOnlyIsEmptyOutput(t *testing.T) {
|
||||
turn := BuildTurnFromCollected(sse.CollectResult{Thinking: "hidden"}, BuildOptions{})
|
||||
if turn.Error == nil || turn.Error.Code != "upstream_empty_output" {
|
||||
t.Fatalf("expected empty output error, got %#v", turn.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTurnFromCollectedToolChoiceRequired(t *testing.T) {
|
||||
turn := BuildTurnFromCollected(sse.CollectResult{Text: "hello"}, BuildOptions{
|
||||
ToolChoice: promptcompat.ToolChoicePolicy{Mode: promptcompat.ToolChoiceRequired},
|
||||
})
|
||||
if turn.Error == nil || turn.Error.Code != "tool_choice_violation" {
|
||||
t.Fatalf("expected tool choice violation, got %#v", turn.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTurnFromStreamSnapshotUsesVisibleTextAndRawToolDetection(t *testing.T) {
|
||||
turn := BuildTurnFromStreamSnapshot(StreamSnapshot{
|
||||
RawText: `<tool_calls><invoke name="Write"><parameter name="content">{"x":1}</parameter></invoke></tool_calls>`,
|
||||
VisibleText: "",
|
||||
}, BuildOptions{
|
||||
ToolNames: []string{"Write"},
|
||||
ToolsRaw: []any{map[string]any{
|
||||
"name": "Write",
|
||||
"schema": map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"content": map[string]any{"type": "string"},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
if len(turn.ToolCalls) != 1 {
|
||||
t.Fatalf("expected stream snapshot tool call, got %d", len(turn.ToolCalls))
|
||||
}
|
||||
if _, ok := turn.ToolCalls[0].Input["content"].(string); !ok {
|
||||
t.Fatalf("expected stream snapshot schema coercion, got %#v", turn.ToolCalls[0].Input["content"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTurnFromStreamSnapshotAlreadyEmittedToolAvoidsEmptyError(t *testing.T) {
|
||||
turn := BuildTurnFromStreamSnapshot(StreamSnapshot{AlreadyEmittedCalls: true}, BuildOptions{})
|
||||
if turn.Error != nil {
|
||||
t.Fatalf("unexpected empty-output error after emitted tool call: %#v", turn.Error)
|
||||
}
|
||||
if turn.StopReason != StopReasonToolCalls {
|
||||
t.Fatalf("stop reason mismatch: %q", turn.StopReason)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalizeTurnStopOutcome(t *testing.T) {
|
||||
turn := BuildTurnFromCollected(sse.CollectResult{Text: "hello"}, BuildOptions{})
|
||||
outcome := FinalizeTurn(turn, FinalizeOptions{})
|
||||
if outcome.ShouldFail {
|
||||
t.Fatalf("unexpected failure: %#v", outcome.Error)
|
||||
}
|
||||
if outcome.FinishReason != "stop" || !outcome.HasVisibleText || !outcome.HasVisibleOutput {
|
||||
t.Fatalf("unexpected outcome: %#v", outcome)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalizeTurnToolCallsOutcome(t *testing.T) {
|
||||
turn := BuildTurnFromStreamSnapshot(StreamSnapshot{AlreadyEmittedCalls: true}, BuildOptions{})
|
||||
outcome := FinalizeTurn(turn, FinalizeOptions{AlreadyEmittedToolCalls: true})
|
||||
if outcome.ShouldFail || outcome.FinishReason != "tool_calls" || !outcome.HasToolCalls {
|
||||
t.Fatalf("unexpected tool outcome: %#v", outcome)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalizeTurnContentFilterOutcome(t *testing.T) {
|
||||
turn := BuildTurnFromCollected(sse.CollectResult{ContentFilter: true}, BuildOptions{})
|
||||
outcome := FinalizeTurn(turn, FinalizeOptions{})
|
||||
if !outcome.ShouldFail || outcome.Error == nil || outcome.Error.Code != "content_filter" {
|
||||
t.Fatalf("expected content filter failure, got %#v", outcome)
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
|
||||
"ds2api/internal/config"
|
||||
"ds2api/internal/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -46,6 +47,7 @@ type Entry struct {
|
||||
Stream bool `json:"stream"`
|
||||
UserInput string `json:"user_input,omitempty"`
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
HistoryText string `json:"history_text,omitempty"`
|
||||
FinalPrompt string `json:"final_prompt,omitempty"`
|
||||
ReasoningContent string `json:"reasoning_content,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
@@ -94,6 +96,7 @@ type StartParams struct {
|
||||
Stream bool
|
||||
UserInput string
|
||||
Messages []Message
|
||||
HistoryText string
|
||||
FinalPrompt string
|
||||
}
|
||||
|
||||
@@ -190,6 +193,18 @@ func (s *Store) Snapshot() (File, error) {
|
||||
return cloneFile(s.state), nil
|
||||
}
|
||||
|
||||
func (s *Store) Revision() (int64, error) {
|
||||
if s == nil {
|
||||
return 0, errors.New("chat history store is nil")
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.err != nil {
|
||||
return 0, s.err
|
||||
}
|
||||
return s.state.Revision, nil
|
||||
}
|
||||
|
||||
func (s *Store) Enabled() bool {
|
||||
if s == nil {
|
||||
return false
|
||||
@@ -218,6 +233,22 @@ func (s *Store) Get(id string) (Entry, error) {
|
||||
return cloneEntry(item), nil
|
||||
}
|
||||
|
||||
func (s *Store) DetailRevision(id string) (int64, error) {
|
||||
if s == nil {
|
||||
return 0, errors.New("chat history store is nil")
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.err != nil {
|
||||
return 0, s.err
|
||||
}
|
||||
item, ok := s.details[strings.TrimSpace(id)]
|
||||
if !ok {
|
||||
return 0, errors.New("chat history entry not found")
|
||||
}
|
||||
return item.Revision, nil
|
||||
}
|
||||
|
||||
func (s *Store) Start(params StartParams) (Entry, error) {
|
||||
if s == nil {
|
||||
return Entry{}, errors.New("chat history store is nil")
|
||||
@@ -244,6 +275,7 @@ func (s *Store) Start(params StartParams) (Entry, error) {
|
||||
Stream: params.Stream,
|
||||
UserInput: strings.TrimSpace(params.UserInput),
|
||||
Messages: cloneMessages(params.Messages),
|
||||
HistoryText: params.HistoryText,
|
||||
FinalPrompt: strings.TrimSpace(params.FinalPrompt),
|
||||
}
|
||||
s.details[entry.ID] = entry
|
||||
@@ -278,8 +310,12 @@ func (s *Store) Update(id string, params UpdateParams) (Entry, error) {
|
||||
if params.Status != "" {
|
||||
item.Status = params.Status
|
||||
}
|
||||
item.ReasoningContent = params.ReasoningContent
|
||||
item.Content = params.Content
|
||||
if params.ReasoningContent != "" || item.ReasoningContent == "" {
|
||||
item.ReasoningContent = params.ReasoningContent
|
||||
}
|
||||
if params.Content != "" || item.Content == "" {
|
||||
item.Content = params.Content
|
||||
}
|
||||
item.Error = strings.TrimSpace(params.Error)
|
||||
item.StatusCode = params.StatusCode
|
||||
item.ElapsedMs = params.ElapsedMs
|
||||
@@ -579,8 +615,8 @@ func buildPreview(item Entry) string {
|
||||
if candidate == "" {
|
||||
candidate = strings.TrimSpace(item.UserInput)
|
||||
}
|
||||
if len(candidate) > defaultPreviewAt {
|
||||
return candidate[:defaultPreviewAt] + "..."
|
||||
if truncated, ok := util.TruncateRunes(candidate, defaultPreviewAt); ok {
|
||||
return truncated + "..."
|
||||
}
|
||||
return candidate
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
func blockDetailDir(t *testing.T, detailDir string) func() {
|
||||
@@ -46,7 +47,7 @@ func TestStoreCreatesAndPersistsEntries(t *testing.T) {
|
||||
started, err := store.Start(StartParams{
|
||||
CallerID: "caller:abc",
|
||||
AccountID: "user@example.com",
|
||||
Model: "deepseek-chat",
|
||||
Model: "deepseek-v4-flash",
|
||||
Stream: true,
|
||||
UserInput: "hello",
|
||||
})
|
||||
@@ -105,6 +106,17 @@ func TestStoreCreatesAndPersistsEntries(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPreviewPreservesUTF8MB4Characters(t *testing.T) {
|
||||
long := strings.Repeat("😀", defaultPreviewAt+1)
|
||||
preview := buildPreview(Entry{Content: long})
|
||||
if !utf8.ValidString(preview) {
|
||||
t.Fatalf("expected valid utf-8 preview, got %q", preview)
|
||||
}
|
||||
if preview != strings.Repeat("😀", defaultPreviewAt)+"..." {
|
||||
t.Fatalf("unexpected preview: %q", preview)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreTrimsToConfiguredLimit(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "chat_history.json")
|
||||
store := New(path)
|
||||
@@ -113,7 +125,7 @@ func TestStoreTrimsToConfiguredLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 12; i++ {
|
||||
entry, err := store.Start(StartParams{Model: "deepseek-chat", UserInput: "msg"})
|
||||
entry, err := store.Start(StartParams{Model: "deepseek-v4-flash", UserInput: "msg"})
|
||||
if err != nil {
|
||||
t.Fatalf("start %d failed: %v", i, err)
|
||||
}
|
||||
@@ -197,7 +209,7 @@ func TestStoreConcurrentUpdatesKeepSplitFilesValid(t *testing.T) {
|
||||
defer wg.Done()
|
||||
entry, err := store.Start(StartParams{
|
||||
CallerID: "caller:test",
|
||||
Model: "deepseek-chat",
|
||||
Model: "deepseek-v4-flash",
|
||||
UserInput: "hello",
|
||||
})
|
||||
if err != nil {
|
||||
@@ -299,7 +311,7 @@ func TestStoreAutoMigratesMetadataOnlyLegacyMonolith(t *testing.T) {
|
||||
Status: "error",
|
||||
CallerID: "caller:test",
|
||||
AccountID: "acct:test",
|
||||
Model: "deepseek-chat",
|
||||
Model: "deepseek-v4-flash",
|
||||
Stream: true,
|
||||
UserInput: "hello",
|
||||
Error: "boom",
|
||||
@@ -481,3 +493,112 @@ func TestStoreWritesOnlyChangedDetailFiles(t *testing.T) {
|
||||
t.Fatalf("expected untouched detail file to remain byte-identical")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePreservesContentWhenNewContentIsEmpty(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "chat_history.json")
|
||||
store := New(path)
|
||||
|
||||
started, err := store.Start(StartParams{
|
||||
CallerID: "caller:abc",
|
||||
Model: "deepseek-v4-flash",
|
||||
Stream: true,
|
||||
UserInput: "hello",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("start entry failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := store.Update(started.ID, UpdateParams{
|
||||
Status: "streaming",
|
||||
ReasoningContent: "let me think",
|
||||
Content: "I'll help you with that.",
|
||||
}); err != nil {
|
||||
t.Fatalf("progress update failed: %v", err)
|
||||
}
|
||||
|
||||
updated, err := store.Update(started.ID, UpdateParams{
|
||||
Status: "success",
|
||||
Content: "",
|
||||
Completed: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("success update failed: %v", err)
|
||||
}
|
||||
|
||||
if updated.Content != "I'll help you with that." {
|
||||
t.Fatalf("expected content to be preserved, got %q", updated.Content)
|
||||
}
|
||||
if updated.ReasoningContent != "let me think" {
|
||||
t.Fatalf("expected reasoning content to be preserved, got %q", updated.ReasoningContent)
|
||||
}
|
||||
|
||||
full, err := store.Get(started.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("get entry failed: %v", err)
|
||||
}
|
||||
if full.Content != "I'll help you with that." {
|
||||
t.Fatalf("expected persisted content to be preserved, got %q", full.Content)
|
||||
}
|
||||
if full.ReasoningContent != "let me think" {
|
||||
t.Fatalf("expected persisted reasoning content to be preserved, got %q", full.ReasoningContent)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateAllowsSettingContentFromEmpty(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "chat_history.json")
|
||||
store := New(path)
|
||||
|
||||
started, err := store.Start(StartParams{
|
||||
CallerID: "caller:abc",
|
||||
Model: "deepseek-v4-flash",
|
||||
Stream: true,
|
||||
UserInput: "hello",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("start entry failed: %v", err)
|
||||
}
|
||||
|
||||
updated, err := store.Update(started.ID, UpdateParams{
|
||||
Status: "success",
|
||||
Content: "final answer",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("update failed: %v", err)
|
||||
}
|
||||
if updated.Content != "final answer" {
|
||||
t.Fatalf("expected content to be set, got %q", updated.Content)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateAllowsOverwritingContentWithNewValue(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "chat_history.json")
|
||||
store := New(path)
|
||||
|
||||
started, err := store.Start(StartParams{
|
||||
CallerID: "caller:abc",
|
||||
Model: "deepseek-v4-flash",
|
||||
Stream: true,
|
||||
UserInput: "hello",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("start entry failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := store.Update(started.ID, UpdateParams{
|
||||
Status: "streaming",
|
||||
Content: "partial",
|
||||
}); err != nil {
|
||||
t.Fatalf("first update failed: %v", err)
|
||||
}
|
||||
|
||||
updated, err := store.Update(started.ID, UpdateParams{
|
||||
Status: "success",
|
||||
Content: "final answer",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("second update failed: %v", err)
|
||||
}
|
||||
if updated.Content != "final answer" {
|
||||
t.Fatalf("expected content to be overwritten, got %q", updated.Content)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +1,21 @@
|
||||
package claudeconv
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"strings"
|
||||
|
||||
type ClaudeMappingProvider interface {
|
||||
ClaudeMapping() map[string]string
|
||||
}
|
||||
"ds2api/internal/config"
|
||||
)
|
||||
|
||||
func ConvertClaudeToDeepSeek(claudeReq map[string]any, mappingProvider ClaudeMappingProvider, defaultClaudeModel string) map[string]any {
|
||||
func ConvertClaudeToDeepSeek(claudeReq map[string]any, aliasProvider config.ModelAliasReader, defaultClaudeModel string) map[string]any {
|
||||
messages, _ := claudeReq["messages"].([]any)
|
||||
model, _ := claudeReq["model"].(string)
|
||||
if model == "" {
|
||||
model = defaultClaudeModel
|
||||
}
|
||||
|
||||
mapping := map[string]string{}
|
||||
if mappingProvider != nil {
|
||||
mapping = mappingProvider.ClaudeMapping()
|
||||
}
|
||||
dsModel := mapping["fast"]
|
||||
if dsModel == "" {
|
||||
dsModel = "deepseek-chat"
|
||||
}
|
||||
|
||||
modelLower := strings.ToLower(model)
|
||||
if strings.Contains(modelLower, "opus") || strings.Contains(modelLower, "reasoner") || strings.Contains(modelLower, "slow") {
|
||||
if slow := mapping["slow"]; slow != "" {
|
||||
dsModel = slow
|
||||
}
|
||||
dsModel, ok := config.ResolveModel(aliasProvider, model)
|
||||
if !ok || strings.TrimSpace(dsModel) == "" {
|
||||
dsModel = "deepseek-v4-flash"
|
||||
}
|
||||
|
||||
convertedMessages := make([]any, 0, len(messages)+1)
|
||||
|
||||
193
internal/completionruntime/nonstream.go
Normal file
193
internal/completionruntime/nonstream.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package completionruntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"ds2api/internal/assistantturn"
|
||||
"ds2api/internal/auth"
|
||||
"ds2api/internal/config"
|
||||
dsclient "ds2api/internal/deepseek/client"
|
||||
"ds2api/internal/httpapi/openai/history"
|
||||
"ds2api/internal/httpapi/openai/shared"
|
||||
"ds2api/internal/promptcompat"
|
||||
"ds2api/internal/sse"
|
||||
)
|
||||
|
||||
type DeepSeekCaller interface {
|
||||
CreateSession(ctx context.Context, a *auth.RequestAuth, maxAttempts int) (string, error)
|
||||
GetPow(ctx context.Context, a *auth.RequestAuth, maxAttempts int) (string, error)
|
||||
UploadFile(ctx context.Context, a *auth.RequestAuth, req dsclient.UploadFileRequest, maxAttempts int) (*dsclient.UploadFileResult, error)
|
||||
CallCompletion(ctx context.Context, a *auth.RequestAuth, payload map[string]any, powResp string, maxAttempts int) (*http.Response, error)
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
StripReferenceMarkers bool
|
||||
MaxAttempts int
|
||||
RetryEnabled bool
|
||||
RetryMaxAttempts int
|
||||
CurrentInputFile history.CurrentInputConfigReader
|
||||
}
|
||||
|
||||
type NonStreamResult struct {
|
||||
SessionID string
|
||||
Payload map[string]any
|
||||
Turn assistantturn.Turn
|
||||
Attempts int
|
||||
}
|
||||
|
||||
type StartResult struct {
|
||||
SessionID string
|
||||
Payload map[string]any
|
||||
Pow string
|
||||
Response *http.Response
|
||||
Request promptcompat.StandardRequest
|
||||
}
|
||||
|
||||
func StartCompletion(ctx context.Context, ds DeepSeekCaller, a *auth.RequestAuth, stdReq promptcompat.StandardRequest, opts Options) (StartResult, *assistantturn.OutputError) {
|
||||
maxAttempts := opts.MaxAttempts
|
||||
if maxAttempts <= 0 {
|
||||
maxAttempts = 3
|
||||
}
|
||||
var prepErr *assistantturn.OutputError
|
||||
stdReq, prepErr = prepareCurrentInputFile(ctx, ds, a, stdReq, opts)
|
||||
if prepErr != nil {
|
||||
return StartResult{Request: stdReq}, prepErr
|
||||
}
|
||||
sessionID, err := ds.CreateSession(ctx, a, maxAttempts)
|
||||
if err != nil {
|
||||
return StartResult{Request: stdReq}, authOutputError(a)
|
||||
}
|
||||
pow, err := ds.GetPow(ctx, a, maxAttempts)
|
||||
if err != nil {
|
||||
return StartResult{SessionID: sessionID, Request: stdReq}, &assistantturn.OutputError{Status: http.StatusUnauthorized, Message: "Failed to get PoW (invalid token or unknown error).", Code: "error"}
|
||||
}
|
||||
payload := stdReq.CompletionPayload(sessionID)
|
||||
resp, err := ds.CallCompletion(ctx, a, payload, pow, maxAttempts)
|
||||
if err != nil {
|
||||
return StartResult{SessionID: sessionID, Payload: payload, Pow: pow, Request: stdReq}, &assistantturn.OutputError{Status: http.StatusInternalServerError, Message: "Failed to get completion.", Code: "error"}
|
||||
}
|
||||
return StartResult{SessionID: sessionID, Payload: payload, Pow: pow, Response: resp, Request: stdReq}, nil
|
||||
}
|
||||
|
||||
func prepareCurrentInputFile(ctx context.Context, ds DeepSeekCaller, a *auth.RequestAuth, stdReq promptcompat.StandardRequest, opts Options) (promptcompat.StandardRequest, *assistantturn.OutputError) {
|
||||
if opts.CurrentInputFile == nil || stdReq.CurrentInputFileApplied {
|
||||
return stdReq, nil
|
||||
}
|
||||
out, err := (history.Service{Store: opts.CurrentInputFile, DS: ds}).ApplyCurrentInputFile(ctx, a, stdReq)
|
||||
if err != nil {
|
||||
status, message := history.MapError(err)
|
||||
return out, &assistantturn.OutputError{Status: status, Message: message, Code: "error"}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ExecuteNonStreamWithRetry(ctx context.Context, ds DeepSeekCaller, a *auth.RequestAuth, stdReq promptcompat.StandardRequest, opts Options) (NonStreamResult, *assistantturn.OutputError) {
|
||||
start, startErr := StartCompletion(ctx, ds, a, stdReq, opts)
|
||||
if startErr != nil {
|
||||
return NonStreamResult{SessionID: start.SessionID, Payload: start.Payload}, startErr
|
||||
}
|
||||
stdReq = start.Request
|
||||
maxAttempts := opts.MaxAttempts
|
||||
if maxAttempts <= 0 {
|
||||
maxAttempts = 3
|
||||
}
|
||||
sessionID := start.SessionID
|
||||
payload := start.Payload
|
||||
pow := start.Pow
|
||||
|
||||
attempts := 0
|
||||
currentResp := start.Response
|
||||
usagePrompt := stdReq.PromptTokenText
|
||||
accumulatedThinking := ""
|
||||
accumulatedRawThinking := ""
|
||||
accumulatedToolDetectionThinking := ""
|
||||
for {
|
||||
turn, outErr := collectAttempt(currentResp, stdReq, usagePrompt, opts)
|
||||
if outErr != nil {
|
||||
return NonStreamResult{SessionID: sessionID, Payload: payload, Attempts: attempts}, outErr
|
||||
}
|
||||
accumulatedThinking += sse.TrimContinuationOverlap(accumulatedThinking, turn.Thinking)
|
||||
accumulatedRawThinking += sse.TrimContinuationOverlap(accumulatedRawThinking, turn.RawThinking)
|
||||
accumulatedToolDetectionThinking += sse.TrimContinuationOverlap(accumulatedToolDetectionThinking, turn.DetectionThinking)
|
||||
turn.Thinking = accumulatedThinking
|
||||
turn.RawThinking = accumulatedRawThinking
|
||||
turn.DetectionThinking = accumulatedToolDetectionThinking
|
||||
turn = assistantturn.BuildTurnFromCollected(sse.CollectResult{
|
||||
Text: turn.RawText,
|
||||
Thinking: turn.RawThinking,
|
||||
ToolDetectionThinking: turn.DetectionThinking,
|
||||
ContentFilter: turn.ContentFilter,
|
||||
CitationLinks: turn.CitationLinks,
|
||||
ResponseMessageID: turn.ResponseMessageID,
|
||||
}, buildOptions(stdReq, usagePrompt, opts))
|
||||
|
||||
retryMax := opts.RetryMaxAttempts
|
||||
if retryMax <= 0 {
|
||||
retryMax = shared.EmptyOutputRetryMaxAttempts()
|
||||
}
|
||||
if !opts.RetryEnabled || !assistantturn.ShouldRetryEmptyOutput(turn, attempts, retryMax) {
|
||||
return NonStreamResult{SessionID: sessionID, Payload: payload, Turn: turn, Attempts: attempts}, turn.Error
|
||||
}
|
||||
|
||||
attempts++
|
||||
config.Logger.Info("[completion_runtime_empty_retry] attempting synthetic retry", "surface", stdReq.Surface, "stream", false, "retry_attempt", attempts, "parent_message_id", turn.ResponseMessageID)
|
||||
retryPow, powErr := ds.GetPow(ctx, a, maxAttempts)
|
||||
if powErr != nil {
|
||||
config.Logger.Warn("[completion_runtime_empty_retry] retry PoW fetch failed, falling back to original PoW", "surface", stdReq.Surface, "retry_attempt", attempts, "error", powErr)
|
||||
retryPow = pow
|
||||
}
|
||||
retryPayload := shared.ClonePayloadForEmptyOutputRetry(payload, turn.ResponseMessageID)
|
||||
nextResp, err := ds.CallCompletion(ctx, a, retryPayload, retryPow, maxAttempts)
|
||||
if err != nil {
|
||||
return NonStreamResult{SessionID: sessionID, Payload: payload, Turn: turn, Attempts: attempts}, &assistantturn.OutputError{Status: http.StatusInternalServerError, Message: "Failed to get completion.", Code: "error"}
|
||||
}
|
||||
usagePrompt = shared.UsagePromptWithEmptyOutputRetry(usagePrompt, attempts)
|
||||
currentResp = nextResp
|
||||
}
|
||||
}
|
||||
|
||||
func collectAttempt(resp *http.Response, stdReq promptcompat.StandardRequest, usagePrompt string, opts Options) (assistantturn.Turn, *assistantturn.OutputError) {
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
config.Logger.Warn("[completion_runtime] response body close failed", "surface", stdReq.Surface, "error", err)
|
||||
}
|
||||
}()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
message := strings.TrimSpace(string(body))
|
||||
if message == "" {
|
||||
message = http.StatusText(resp.StatusCode)
|
||||
}
|
||||
return assistantturn.Turn{}, &assistantturn.OutputError{Status: resp.StatusCode, Message: message, Code: "error"}
|
||||
}
|
||||
result := sse.CollectStream(resp, stdReq.Thinking, false)
|
||||
return assistantturn.BuildTurnFromCollected(result, buildOptions(stdReq, usagePrompt, opts)), nil
|
||||
}
|
||||
|
||||
func buildOptions(stdReq promptcompat.StandardRequest, prompt string, opts Options) assistantturn.BuildOptions {
|
||||
return assistantturn.BuildOptions{
|
||||
Model: stdReq.ResponseModel,
|
||||
Prompt: prompt,
|
||||
RefFileTokens: stdReq.RefFileTokens,
|
||||
SearchEnabled: stdReq.Search,
|
||||
StripReferenceMarkers: opts.StripReferenceMarkers,
|
||||
ToolNames: stdReq.ToolNames,
|
||||
ToolsRaw: stdReq.ToolsRaw,
|
||||
ToolChoice: stdReq.ToolChoice,
|
||||
}
|
||||
}
|
||||
|
||||
func authOutputError(a *auth.RequestAuth) *assistantturn.OutputError {
|
||||
if a != nil && a.UseConfigToken {
|
||||
return &assistantturn.OutputError{Status: http.StatusUnauthorized, Message: "Account token is invalid. Please re-login the account in admin.", Code: "error"}
|
||||
}
|
||||
return &assistantturn.OutputError{Status: http.StatusUnauthorized, Message: "Invalid token. If this should be a DS2API key, add it to config.keys first.", Code: "error"}
|
||||
}
|
||||
|
||||
func Errorf(status int, format string, args ...any) *assistantturn.OutputError {
|
||||
return &assistantturn.OutputError{Status: status, Message: fmt.Sprintf(format, args...), Code: "error"}
|
||||
}
|
||||
174
internal/completionruntime/nonstream_test.go
Normal file
174
internal/completionruntime/nonstream_test.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package completionruntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"ds2api/internal/auth"
|
||||
dsclient "ds2api/internal/deepseek/client"
|
||||
"ds2api/internal/promptcompat"
|
||||
)
|
||||
|
||||
type fakeDeepSeekCaller struct {
|
||||
responses []*http.Response
|
||||
payloads []map[string]any
|
||||
uploads []dsclient.UploadFileRequest
|
||||
}
|
||||
|
||||
type currentInputRuntimeConfig struct{}
|
||||
|
||||
func (currentInputRuntimeConfig) CurrentInputFileEnabled() bool { return true }
|
||||
func (currentInputRuntimeConfig) CurrentInputFileMinChars() int { return 0 }
|
||||
|
||||
func (f *fakeDeepSeekCaller) CreateSession(context.Context, *auth.RequestAuth, int) (string, error) {
|
||||
return "session-1", nil
|
||||
}
|
||||
|
||||
func (f *fakeDeepSeekCaller) GetPow(context.Context, *auth.RequestAuth, int) (string, error) {
|
||||
return "pow", nil
|
||||
}
|
||||
|
||||
func (f *fakeDeepSeekCaller) UploadFile(_ context.Context, _ *auth.RequestAuth, req dsclient.UploadFileRequest, _ int) (*dsclient.UploadFileResult, error) {
|
||||
f.uploads = append(f.uploads, req)
|
||||
return &dsclient.UploadFileResult{ID: "file-runtime-1"}, nil
|
||||
}
|
||||
|
||||
func (f *fakeDeepSeekCaller) CallCompletion(_ context.Context, _ *auth.RequestAuth, payload map[string]any, _ string, _ int) (*http.Response, error) {
|
||||
f.payloads = append(f.payloads, payload)
|
||||
if len(f.responses) == 0 {
|
||||
return sseHTTPResponse(http.StatusOK, `data: {"p":"response/content","v":"fallback"}`), nil
|
||||
}
|
||||
resp := f.responses[0]
|
||||
f.responses = f.responses[1:]
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func TestExecuteNonStreamWithRetryBuildsCanonicalTurn(t *testing.T) {
|
||||
ds := &fakeDeepSeekCaller{responses: []*http.Response{sseHTTPResponse(
|
||||
http.StatusOK,
|
||||
`data: {"response_message_id":42,"p":"response/content","v":"<tool_calls><invoke name=\"Write\"><parameter name=\"content\">{\"x\":1}</parameter></invoke></tool_calls>"}`,
|
||||
)}}
|
||||
stdReq := promptcompat.StandardRequest{
|
||||
Surface: "test",
|
||||
ResponseModel: "deepseek-v4-flash",
|
||||
PromptTokenText: "prompt",
|
||||
FinalPrompt: "final prompt",
|
||||
ToolNames: []string{"Write"},
|
||||
ToolsRaw: []any{map[string]any{
|
||||
"name": "Write",
|
||||
"input_schema": map[string]any{
|
||||
"type": "object",
|
||||
"properties": map[string]any{
|
||||
"content": map[string]any{"type": "string"},
|
||||
},
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
result, outErr := ExecuteNonStreamWithRetry(context.Background(), ds, &auth.RequestAuth{}, stdReq, Options{})
|
||||
if outErr != nil {
|
||||
t.Fatalf("unexpected output error: %#v", outErr)
|
||||
}
|
||||
if result.SessionID != "session-1" {
|
||||
t.Fatalf("session mismatch: %q", result.SessionID)
|
||||
}
|
||||
if got := result.Turn.ResponseMessageID; got != 42 {
|
||||
t.Fatalf("response message id mismatch: %d", got)
|
||||
}
|
||||
if len(result.Turn.ToolCalls) != 1 {
|
||||
t.Fatalf("expected one tool call, got %d", len(result.Turn.ToolCalls))
|
||||
}
|
||||
if _, ok := result.Turn.ToolCalls[0].Input["content"].(string); !ok {
|
||||
t.Fatalf("expected schema-normalized string argument, got %#v", result.Turn.ToolCalls[0].Input["content"])
|
||||
}
|
||||
if result.Turn.Usage.InputTokens == 0 || result.Turn.Usage.TotalTokens == 0 {
|
||||
t.Fatalf("expected usage to be populated, got %#v", result.Turn.Usage)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecuteNonStreamWithRetryUsesParentMessageForEmptyRetry(t *testing.T) {
|
||||
ds := &fakeDeepSeekCaller{responses: []*http.Response{
|
||||
sseHTTPResponse(http.StatusOK, `data: {"response_message_id":77,"p":"response/status","v":"FINISHED"}`),
|
||||
sseHTTPResponse(http.StatusOK, `data: {"response_message_id":78,"p":"response/content","v":"ok"}`),
|
||||
}}
|
||||
stdReq := promptcompat.StandardRequest{
|
||||
Surface: "test",
|
||||
ResponseModel: "deepseek-v4-flash",
|
||||
PromptTokenText: "prompt",
|
||||
FinalPrompt: "final prompt",
|
||||
}
|
||||
|
||||
result, outErr := ExecuteNonStreamWithRetry(context.Background(), ds, &auth.RequestAuth{}, stdReq, Options{RetryEnabled: true})
|
||||
if outErr != nil {
|
||||
t.Fatalf("unexpected output error: %#v", outErr)
|
||||
}
|
||||
if result.Attempts != 1 {
|
||||
t.Fatalf("expected one retry, got %d", result.Attempts)
|
||||
}
|
||||
if len(ds.payloads) != 2 {
|
||||
t.Fatalf("expected two completion calls, got %d", len(ds.payloads))
|
||||
}
|
||||
if got := ds.payloads[1]["parent_message_id"]; got != 77 {
|
||||
t.Fatalf("retry parent_message_id mismatch: %#v", got)
|
||||
}
|
||||
if result.Turn.Text != "ok" {
|
||||
t.Fatalf("retry text mismatch: %q", result.Turn.Text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartCompletionAppliesCurrentInputFileGlobally(t *testing.T) {
|
||||
ds := &fakeDeepSeekCaller{responses: []*http.Response{sseHTTPResponse(http.StatusOK, `data: {"p":"response/content","v":"ok"}`)}}
|
||||
stdReq := promptcompat.StandardRequest{
|
||||
Surface: "test_adapter",
|
||||
RequestedModel: "deepseek-v4-flash",
|
||||
ResolvedModel: "deepseek-v4-flash",
|
||||
ResponseModel: "deepseek-v4-flash",
|
||||
PromptTokenText: "first user turn",
|
||||
FinalPrompt: "first user turn",
|
||||
Messages: []any{
|
||||
map[string]any{"role": "user", "content": "first user turn"},
|
||||
},
|
||||
}
|
||||
|
||||
start, outErr := StartCompletion(context.Background(), ds, &auth.RequestAuth{DeepSeekToken: "token"}, stdReq, Options{
|
||||
CurrentInputFile: currentInputRuntimeConfig{},
|
||||
})
|
||||
if outErr != nil {
|
||||
t.Fatalf("unexpected output error: %#v", outErr)
|
||||
}
|
||||
if len(ds.uploads) != 1 {
|
||||
t.Fatalf("expected current input upload, got %d", len(ds.uploads))
|
||||
}
|
||||
if got := ds.uploads[0].Filename; got != "DS2API_HISTORY.txt" {
|
||||
t.Fatalf("upload filename=%q want DS2API_HISTORY.txt", got)
|
||||
}
|
||||
if len(ds.payloads) != 1 {
|
||||
t.Fatalf("expected one completion payload, got %d", len(ds.payloads))
|
||||
}
|
||||
refIDs, _ := ds.payloads[0]["ref_file_ids"].([]any)
|
||||
if len(refIDs) != 1 || refIDs[0] != "file-runtime-1" {
|
||||
t.Fatalf("expected uploaded file id in ref_file_ids, got %#v", ds.payloads[0]["ref_file_ids"])
|
||||
}
|
||||
prompt, _ := ds.payloads[0]["prompt"].(string)
|
||||
if !strings.Contains(prompt, "Continue from the latest state in the attached DS2API_HISTORY.txt context.") {
|
||||
t.Fatalf("expected continuation prompt, got %q", prompt)
|
||||
}
|
||||
if !start.Request.CurrentInputFileApplied || !strings.Contains(start.Request.PromptTokenText, "# DS2API_HISTORY.txt") {
|
||||
t.Fatalf("expected prepared request to carry current input file state, got %#v", start.Request)
|
||||
}
|
||||
}
|
||||
|
||||
func sseHTTPResponse(status int, lines ...string) *http.Response {
|
||||
body := strings.Join(lines, "\n")
|
||||
if !strings.HasSuffix(body, "\n") {
|
||||
body += "\n"
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: status,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(strings.NewReader(body)),
|
||||
}
|
||||
}
|
||||
@@ -26,12 +26,6 @@ func (c Config) MarshalJSON() ([]byte, error) {
|
||||
if len(c.Proxies) > 0 {
|
||||
m["proxies"] = c.Proxies
|
||||
}
|
||||
if len(c.ClaudeMapping) > 0 {
|
||||
m["claude_mapping"] = c.ClaudeMapping
|
||||
}
|
||||
if len(c.ClaudeModelMap) > 0 {
|
||||
m["claude_model_mapping"] = c.ClaudeModelMap
|
||||
}
|
||||
if len(c.ModelAliases) > 0 {
|
||||
m["model_aliases"] = c.ModelAliases
|
||||
}
|
||||
@@ -41,9 +35,6 @@ func (c Config) MarshalJSON() ([]byte, error) {
|
||||
if c.Runtime.AccountMaxInflight > 0 || c.Runtime.AccountMaxQueue > 0 || c.Runtime.GlobalMaxInflight > 0 || c.Runtime.TokenRefreshIntervalHours > 0 {
|
||||
m["runtime"] = c.Runtime
|
||||
}
|
||||
if c.Compat.WideInputStrictOutput != nil || c.Compat.StripReferenceMarkers != nil {
|
||||
m["compat"] = c.Compat
|
||||
}
|
||||
if c.Responses.StoreTTLSeconds > 0 {
|
||||
m["responses"] = c.Responses
|
||||
}
|
||||
@@ -51,8 +42,11 @@ func (c Config) MarshalJSON() ([]byte, error) {
|
||||
m["embeddings"] = c.Embeddings
|
||||
}
|
||||
m["auto_delete"] = c.AutoDelete
|
||||
if c.HistorySplit.Enabled != nil || c.HistorySplit.TriggerAfterTurns != nil {
|
||||
m["history_split"] = c.HistorySplit
|
||||
if c.CurrentInputFile.Enabled != nil || c.CurrentInputFile.MinChars != 0 {
|
||||
m["current_input_file"] = c.CurrentInputFile
|
||||
}
|
||||
if c.ThinkingInjection.Enabled != nil || strings.TrimSpace(c.ThinkingInjection.Prompt) != "" {
|
||||
m["thinking_injection"] = c.ThinkingInjection
|
||||
}
|
||||
if c.VercelSyncHash != "" {
|
||||
m["_vercel_sync_hash"] = c.VercelSyncHash
|
||||
@@ -88,13 +82,8 @@ func (c *Config) UnmarshalJSON(b []byte) error {
|
||||
return fmt.Errorf("invalid field %q: %w", k, err)
|
||||
}
|
||||
case "claude_mapping":
|
||||
if err := json.Unmarshal(v, &c.ClaudeMapping); err != nil {
|
||||
return fmt.Errorf("invalid field %q: %w", k, err)
|
||||
}
|
||||
case "claude_model_mapping":
|
||||
if err := json.Unmarshal(v, &c.ClaudeModelMap); err != nil {
|
||||
return fmt.Errorf("invalid field %q: %w", k, err)
|
||||
}
|
||||
// Removed legacy mapping fields are ignored instead of persisted.
|
||||
case "model_aliases":
|
||||
if err := json.Unmarshal(v, &c.ModelAliases); err != nil {
|
||||
return fmt.Errorf("invalid field %q: %w", k, err)
|
||||
@@ -108,8 +97,9 @@ func (c *Config) UnmarshalJSON(b []byte) error {
|
||||
return fmt.Errorf("invalid field %q: %w", k, err)
|
||||
}
|
||||
case "compat":
|
||||
if err := json.Unmarshal(v, &c.Compat); err != nil {
|
||||
return fmt.Errorf("invalid field %q: %w", k, err)
|
||||
// Removed field ignored instead of persisted.
|
||||
if Logger != nil {
|
||||
Logger.Warn("config key \"compat\" is deprecated and ignored; remove it from your configuration")
|
||||
}
|
||||
case "toolcall":
|
||||
// Legacy field ignored. Toolcall policy is fixed and no longer configurable.
|
||||
@@ -126,7 +116,13 @@ func (c *Config) UnmarshalJSON(b []byte) error {
|
||||
return fmt.Errorf("invalid field %q: %w", k, err)
|
||||
}
|
||||
case "history_split":
|
||||
if err := json.Unmarshal(v, &c.HistorySplit); err != nil {
|
||||
// Removed legacy split field is ignored instead of persisted.
|
||||
case "current_input_file":
|
||||
if err := json.Unmarshal(v, &c.CurrentInputFile); err != nil {
|
||||
return fmt.Errorf("invalid field %q: %w", k, err)
|
||||
}
|
||||
case "thinking_injection":
|
||||
if err := json.Unmarshal(v, &c.ThinkingInjection); err != nil {
|
||||
return fmt.Errorf("invalid field %q: %w", k, err)
|
||||
}
|
||||
case "_vercel_sync_hash":
|
||||
@@ -150,25 +146,23 @@ func (c *Config) UnmarshalJSON(b []byte) error {
|
||||
|
||||
func (c Config) Clone() Config {
|
||||
clone := Config{
|
||||
Keys: slices.Clone(c.Keys),
|
||||
APIKeys: slices.Clone(c.APIKeys),
|
||||
Accounts: slices.Clone(c.Accounts),
|
||||
Proxies: slices.Clone(c.Proxies),
|
||||
ClaudeMapping: cloneStringMap(c.ClaudeMapping),
|
||||
ClaudeModelMap: cloneStringMap(c.ClaudeModelMap),
|
||||
ModelAliases: cloneStringMap(c.ModelAliases),
|
||||
Admin: c.Admin,
|
||||
Runtime: c.Runtime,
|
||||
Compat: CompatConfig{
|
||||
WideInputStrictOutput: cloneBoolPtr(c.Compat.WideInputStrictOutput),
|
||||
StripReferenceMarkers: cloneBoolPtr(c.Compat.StripReferenceMarkers),
|
||||
Keys: slices.Clone(c.Keys),
|
||||
APIKeys: slices.Clone(c.APIKeys),
|
||||
Accounts: slices.Clone(c.Accounts),
|
||||
Proxies: slices.Clone(c.Proxies),
|
||||
ModelAliases: cloneStringMap(c.ModelAliases),
|
||||
Admin: c.Admin,
|
||||
Runtime: c.Runtime,
|
||||
Responses: c.Responses,
|
||||
Embeddings: c.Embeddings,
|
||||
AutoDelete: c.AutoDelete,
|
||||
CurrentInputFile: CurrentInputFileConfig{
|
||||
Enabled: cloneBoolPtr(c.CurrentInputFile.Enabled),
|
||||
MinChars: c.CurrentInputFile.MinChars,
|
||||
},
|
||||
Responses: c.Responses,
|
||||
Embeddings: c.Embeddings,
|
||||
AutoDelete: c.AutoDelete,
|
||||
HistorySplit: HistorySplitConfig{
|
||||
Enabled: cloneBoolPtr(c.HistorySplit.Enabled),
|
||||
TriggerAfterTurns: cloneIntPtr(c.HistorySplit.TriggerAfterTurns),
|
||||
ThinkingInjection: ThinkingInjectionConfig{
|
||||
Enabled: cloneBoolPtr(c.ThinkingInjection.Enabled),
|
||||
Prompt: c.ThinkingInjection.Prompt,
|
||||
},
|
||||
VercelSyncHash: c.VercelSyncHash,
|
||||
VercelSyncTime: c.VercelSyncTime,
|
||||
@@ -199,14 +193,6 @@ func cloneBoolPtr(in *bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
func cloneIntPtr(in *int) *int {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
v := *in
|
||||
return &v
|
||||
}
|
||||
|
||||
func parseConfigString(raw string) (Config, error) {
|
||||
var cfg Config
|
||||
candidates := []string{raw}
|
||||
|
||||
@@ -8,23 +8,21 @@ import (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Keys []string `json:"keys,omitempty"`
|
||||
APIKeys []APIKey `json:"api_keys,omitempty"`
|
||||
Accounts []Account `json:"accounts,omitempty"`
|
||||
Proxies []Proxy `json:"proxies,omitempty"`
|
||||
ClaudeMapping map[string]string `json:"claude_mapping,omitempty"`
|
||||
ClaudeModelMap map[string]string `json:"claude_model_mapping,omitempty"`
|
||||
ModelAliases map[string]string `json:"model_aliases,omitempty"`
|
||||
Admin AdminConfig `json:"admin,omitempty"`
|
||||
Runtime RuntimeConfig `json:"runtime,omitempty"`
|
||||
Compat CompatConfig `json:"compat,omitempty"`
|
||||
Responses ResponsesConfig `json:"responses,omitempty"`
|
||||
Embeddings EmbeddingsConfig `json:"embeddings,omitempty"`
|
||||
AutoDelete AutoDeleteConfig `json:"auto_delete"`
|
||||
HistorySplit HistorySplitConfig `json:"history_split"`
|
||||
VercelSyncHash string `json:"_vercel_sync_hash,omitempty"`
|
||||
VercelSyncTime int64 `json:"_vercel_sync_time,omitempty"`
|
||||
AdditionalFields map[string]any `json:"-"`
|
||||
Keys []string `json:"keys,omitempty"`
|
||||
APIKeys []APIKey `json:"api_keys,omitempty"`
|
||||
Accounts []Account `json:"accounts,omitempty"`
|
||||
Proxies []Proxy `json:"proxies,omitempty"`
|
||||
ModelAliases map[string]string `json:"model_aliases,omitempty"`
|
||||
Admin AdminConfig `json:"admin,omitempty"`
|
||||
Runtime RuntimeConfig `json:"runtime,omitempty"`
|
||||
Responses ResponsesConfig `json:"responses,omitempty"`
|
||||
Embeddings EmbeddingsConfig `json:"embeddings,omitempty"`
|
||||
AutoDelete AutoDeleteConfig `json:"auto_delete"`
|
||||
CurrentInputFile CurrentInputFileConfig `json:"current_input_file,omitempty"`
|
||||
ThinkingInjection ThinkingInjectionConfig `json:"thinking_injection,omitempty"`
|
||||
VercelSyncHash string `json:"_vercel_sync_hash,omitempty"`
|
||||
VercelSyncTime int64 `json:"_vercel_sync_time,omitempty"`
|
||||
AdditionalFields map[string]any `json:"-"`
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
@@ -100,6 +98,8 @@ func (c *Config) NormalizeCredentials() {
|
||||
c.Accounts[i].Name = strings.TrimSpace(c.Accounts[i].Name)
|
||||
c.Accounts[i].Remark = strings.TrimSpace(c.Accounts[i].Remark)
|
||||
}
|
||||
|
||||
c.normalizeModelAliases()
|
||||
}
|
||||
|
||||
// DropInvalidAccounts removes accounts that cannot be addressed by admin APIs
|
||||
@@ -119,9 +119,25 @@ func (c *Config) DropInvalidAccounts() {
|
||||
c.Accounts = kept
|
||||
}
|
||||
|
||||
type CompatConfig struct {
|
||||
WideInputStrictOutput *bool `json:"wide_input_strict_output,omitempty"`
|
||||
StripReferenceMarkers *bool `json:"strip_reference_markers,omitempty"`
|
||||
func (c *Config) normalizeModelAliases() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
aliases := map[string]string{}
|
||||
for k, v := range c.ModelAliases {
|
||||
key := strings.TrimSpace(lower(k))
|
||||
val := strings.TrimSpace(lower(v))
|
||||
if key == "" || val == "" {
|
||||
continue
|
||||
}
|
||||
aliases[key] = val
|
||||
}
|
||||
if len(aliases) == 0 {
|
||||
c.ModelAliases = nil
|
||||
} else {
|
||||
c.ModelAliases = aliases
|
||||
}
|
||||
}
|
||||
|
||||
type AdminConfig struct {
|
||||
@@ -150,7 +166,12 @@ type AutoDeleteConfig struct {
|
||||
Sessions bool `json:"sessions,omitempty"`
|
||||
}
|
||||
|
||||
type HistorySplitConfig struct {
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
TriggerAfterTurns *int `json:"trigger_after_turns,omitempty"`
|
||||
type CurrentInputFileConfig struct {
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
MinChars int `json:"min_chars,omitempty"`
|
||||
}
|
||||
|
||||
type ThinkingInjectionConfig struct {
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
}
|
||||
|
||||
@@ -10,19 +10,29 @@ import (
|
||||
// ─── GetModelConfig edge cases ───────────────────────────────────────
|
||||
|
||||
func TestGetModelConfigDeepSeekChat(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("deepseek-chat")
|
||||
thinking, search, ok := GetModelConfig("deepseek-v4-flash")
|
||||
if !ok {
|
||||
t.Fatal("expected ok for deepseek-chat")
|
||||
t.Fatal("expected ok for deepseek-v4-flash")
|
||||
}
|
||||
if !thinking || search {
|
||||
t.Fatalf("expected thinking=true search=false for deepseek-v4-flash, got thinking=%v search=%v", thinking, search)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModelConfigDeepSeekChatNoThinking(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("deepseek-v4-flash-nothinking")
|
||||
if !ok {
|
||||
t.Fatal("expected ok for deepseek-v4-flash-nothinking")
|
||||
}
|
||||
if thinking || search {
|
||||
t.Fatalf("expected no thinking/search for deepseek-chat, got thinking=%v search=%v", thinking, search)
|
||||
t.Fatalf("expected thinking=false search=false for deepseek-v4-flash-nothinking, got thinking=%v search=%v", thinking, search)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModelConfigDeepSeekReasoner(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("deepseek-reasoner")
|
||||
thinking, search, ok := GetModelConfig("deepseek-v4-pro")
|
||||
if !ok {
|
||||
t.Fatal("expected ok for deepseek-reasoner")
|
||||
t.Fatal("expected ok for deepseek-v4-pro")
|
||||
}
|
||||
if !thinking || search {
|
||||
t.Fatalf("expected thinking=true search=false, got thinking=%v search=%v", thinking, search)
|
||||
@@ -30,19 +40,19 @@ func TestGetModelConfigDeepSeekReasoner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetModelConfigDeepSeekChatSearch(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("deepseek-chat-search")
|
||||
thinking, search, ok := GetModelConfig("deepseek-v4-flash-search")
|
||||
if !ok {
|
||||
t.Fatal("expected ok for deepseek-chat-search")
|
||||
t.Fatal("expected ok for deepseek-v4-flash-search")
|
||||
}
|
||||
if thinking || !search {
|
||||
t.Fatalf("expected thinking=false search=true, got thinking=%v search=%v", thinking, search)
|
||||
if !thinking || !search {
|
||||
t.Fatalf("expected thinking=true search=true, got thinking=%v search=%v", thinking, search)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModelConfigDeepSeekReasonerSearch(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("deepseek-reasoner-search")
|
||||
thinking, search, ok := GetModelConfig("deepseek-v4-pro-search")
|
||||
if !ok {
|
||||
t.Fatal("expected ok for deepseek-reasoner-search")
|
||||
t.Fatal("expected ok for deepseek-v4-pro-search")
|
||||
}
|
||||
if !thinking || !search {
|
||||
t.Fatalf("expected both true, got thinking=%v search=%v", thinking, search)
|
||||
@@ -50,57 +60,68 @@ func TestGetModelConfigDeepSeekReasonerSearch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetModelConfigDeepSeekExpertChat(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("deepseek-expert-chat")
|
||||
thinking, search, ok := GetModelConfig("deepseek-v4-pro")
|
||||
if !ok {
|
||||
t.Fatal("expected ok for deepseek-expert-chat")
|
||||
t.Fatal("expected ok for deepseek-v4-pro")
|
||||
}
|
||||
if thinking || search {
|
||||
t.Fatalf("expected no thinking/search for deepseek-expert-chat, got thinking=%v search=%v", thinking, search)
|
||||
if !thinking || search {
|
||||
t.Fatalf("expected thinking=true search=false for deepseek-v4-pro, got thinking=%v search=%v", thinking, search)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModelConfigDeepSeekExpertReasonerSearch(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("deepseek-expert-reasoner-search")
|
||||
thinking, search, ok := GetModelConfig("deepseek-v4-pro-search")
|
||||
if !ok {
|
||||
t.Fatal("expected ok for deepseek-expert-reasoner-search")
|
||||
t.Fatal("expected ok for deepseek-v4-pro-search")
|
||||
}
|
||||
if !thinking || !search {
|
||||
t.Fatalf("expected both true, got thinking=%v search=%v", thinking, search)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModelConfigDeepSeekVisionReasonerSearch(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("deepseek-vision-reasoner-search")
|
||||
func TestGetModelConfigDeepSeekVision(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("deepseek-v4-vision")
|
||||
if !ok {
|
||||
t.Fatal("expected ok for deepseek-vision-reasoner-search")
|
||||
t.Fatal("expected ok for deepseek-v4-vision")
|
||||
}
|
||||
if !thinking || !search {
|
||||
t.Fatalf("expected both true, got thinking=%v search=%v", thinking, search)
|
||||
if !thinking || search {
|
||||
t.Fatalf("expected thinking=true search=false, got thinking=%v search=%v", thinking, search)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModelConfigDeepSeekVisionSearchUnsupported(t *testing.T) {
|
||||
_, _, ok := GetModelConfig("deepseek-v4-vision-search")
|
||||
if ok {
|
||||
t.Fatal("expected deepseek-v4-vision-search to be unsupported")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModelTypeDefaultExpertAndVision(t *testing.T) {
|
||||
defaultType, ok := GetModelType("deepseek-chat")
|
||||
defaultType, ok := GetModelType("deepseek-v4-flash")
|
||||
if !ok || defaultType != "default" {
|
||||
t.Fatalf("expected default model_type, got ok=%v model_type=%q", ok, defaultType)
|
||||
}
|
||||
expertType, ok := GetModelType("deepseek-expert-chat")
|
||||
defaultNoThinkingType, ok := GetModelType("deepseek-v4-flash-nothinking")
|
||||
if !ok || defaultNoThinkingType != "default" {
|
||||
t.Fatalf("expected default model_type for nothinking, got ok=%v model_type=%q", ok, defaultNoThinkingType)
|
||||
}
|
||||
expertType, ok := GetModelType("deepseek-v4-pro")
|
||||
if !ok || expertType != "expert" {
|
||||
t.Fatalf("expected expert model_type, got ok=%v model_type=%q", ok, expertType)
|
||||
}
|
||||
visionType, ok := GetModelType("deepseek-vision-chat")
|
||||
visionType, ok := GetModelType("deepseek-v4-vision")
|
||||
if !ok || visionType != "vision" {
|
||||
t.Fatalf("expected vision model_type, got ok=%v model_type=%q", ok, visionType)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModelConfigCaseInsensitive(t *testing.T) {
|
||||
thinking, search, ok := GetModelConfig("DeepSeek-Chat")
|
||||
thinking, search, ok := GetModelConfig("DeepSeek-V4-Flash")
|
||||
if !ok {
|
||||
t.Fatal("expected ok for case-insensitive deepseek-chat")
|
||||
t.Fatal("expected ok for case-insensitive deepseek-v4-flash")
|
||||
}
|
||||
if thinking || search {
|
||||
t.Fatalf("expected no thinking/search for case-insensitive deepseek-chat")
|
||||
if !thinking || search {
|
||||
t.Fatalf("expected thinking=true search=false for case-insensitive deepseek-v4-flash")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,29 +163,16 @@ func TestLowerFunction(t *testing.T) {
|
||||
// ─── Config.MarshalJSON / UnmarshalJSON roundtrip ────────────────────
|
||||
|
||||
func TestConfigJSONRoundtrip(t *testing.T) {
|
||||
trueVal := true
|
||||
falseVal := false
|
||||
cfg := Config{
|
||||
Keys: []string{"key1", "key2"},
|
||||
Accounts: []Account{{Email: "user@example.com", Password: "pass", Token: "tok"}},
|
||||
ClaudeMapping: map[string]string{
|
||||
"fast": "deepseek-chat",
|
||||
"slow": "deepseek-reasoner",
|
||||
},
|
||||
Keys: []string{"key1", "key2"},
|
||||
Accounts: []Account{{Email: "user@example.com", Password: "pass", Token: "tok"}},
|
||||
ModelAliases: map[string]string{"Claude-Sonnet-4-6": "DeepSeek-V4-Flash"},
|
||||
AutoDelete: AutoDeleteConfig{
|
||||
Mode: "single",
|
||||
},
|
||||
HistorySplit: HistorySplitConfig{
|
||||
Enabled: &trueVal,
|
||||
TriggerAfterTurns: func() *int { v := 2; return &v }(),
|
||||
},
|
||||
Runtime: RuntimeConfig{
|
||||
TokenRefreshIntervalHours: 12,
|
||||
},
|
||||
Compat: CompatConfig{
|
||||
WideInputStrictOutput: &trueVal,
|
||||
StripReferenceMarkers: &falseVal,
|
||||
},
|
||||
VercelSyncHash: "hash123",
|
||||
VercelSyncTime: 1234567890,
|
||||
AdditionalFields: map[string]any{
|
||||
@@ -188,8 +196,8 @@ func TestConfigJSONRoundtrip(t *testing.T) {
|
||||
if len(decoded.Accounts) != 1 || decoded.Accounts[0].Email != "user@example.com" {
|
||||
t.Fatalf("unexpected accounts: %#v", decoded.Accounts)
|
||||
}
|
||||
if decoded.ClaudeMapping["fast"] != "deepseek-chat" {
|
||||
t.Fatalf("unexpected claude mapping: %#v", decoded.ClaudeMapping)
|
||||
if decoded.ModelAliases["claude-sonnet-4-6"] != "deepseek-v4-flash" {
|
||||
t.Fatalf("unexpected normalized model aliases: %#v", decoded.ModelAliases)
|
||||
}
|
||||
if decoded.Runtime.TokenRefreshIntervalHours != 12 {
|
||||
t.Fatalf("unexpected runtime refresh interval: %#v", decoded.Runtime.TokenRefreshIntervalHours)
|
||||
@@ -197,18 +205,6 @@ func TestConfigJSONRoundtrip(t *testing.T) {
|
||||
if decoded.AutoDelete.Mode != "single" {
|
||||
t.Fatalf("unexpected auto delete mode: %#v", decoded.AutoDelete.Mode)
|
||||
}
|
||||
if decoded.HistorySplit.Enabled == nil || !*decoded.HistorySplit.Enabled {
|
||||
t.Fatalf("unexpected history split enabled: %#v", decoded.HistorySplit.Enabled)
|
||||
}
|
||||
if decoded.HistorySplit.TriggerAfterTurns == nil || *decoded.HistorySplit.TriggerAfterTurns != 2 {
|
||||
t.Fatalf("unexpected history split trigger_after_turns: %#v", decoded.HistorySplit.TriggerAfterTurns)
|
||||
}
|
||||
if decoded.Compat.WideInputStrictOutput == nil || !*decoded.Compat.WideInputStrictOutput {
|
||||
t.Fatalf("unexpected compat wide_input_strict_output: %#v", decoded.Compat.WideInputStrictOutput)
|
||||
}
|
||||
if decoded.Compat.StripReferenceMarkers == nil || *decoded.Compat.StripReferenceMarkers {
|
||||
t.Fatalf("unexpected compat strip_reference_markers: %#v", decoded.Compat.StripReferenceMarkers)
|
||||
}
|
||||
if decoded.VercelSyncHash != "hash123" {
|
||||
t.Fatalf("unexpected vercel sync hash: %q", decoded.VercelSyncHash)
|
||||
}
|
||||
@@ -255,25 +251,48 @@ func TestConfigUnmarshalJSONPreservesUnknownFields(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigUnmarshalJSONIgnoresRemovedLegacyModelMappings(t *testing.T) {
|
||||
raw := `{"keys":["k1"],"accounts":[],"claude_mapping":{"fast":"deepseek-v4-pro"},"claude_model_mapping":{"slow":"deepseek-v4-pro"}}`
|
||||
var cfg Config
|
||||
if err := json.Unmarshal([]byte(raw), &cfg); err != nil {
|
||||
t.Fatalf("unmarshal error: %v", err)
|
||||
}
|
||||
if len(cfg.ModelAliases) != 0 {
|
||||
t.Fatalf("expected removed legacy mappings to be ignored, got %#v", cfg.ModelAliases)
|
||||
}
|
||||
if _, ok := cfg.AdditionalFields["claude_mapping"]; ok {
|
||||
t.Fatalf("expected removed legacy field not to persist in additional fields: %#v", cfg.AdditionalFields)
|
||||
}
|
||||
if _, ok := cfg.AdditionalFields["claude_model_mapping"]; ok {
|
||||
t.Fatalf("expected removed legacy field not to persist in additional fields: %#v", cfg.AdditionalFields)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigUnmarshalJSONIgnoresRemovedHistorySplit(t *testing.T) {
|
||||
raw := `{"keys":["k1"],"history_split":{"enabled":true,"trigger_after_turns":2}}`
|
||||
var cfg Config
|
||||
if err := json.Unmarshal([]byte(raw), &cfg); err != nil {
|
||||
t.Fatalf("unmarshal error: %v", err)
|
||||
}
|
||||
if _, ok := cfg.AdditionalFields["history_split"]; ok {
|
||||
t.Fatalf("expected removed legacy field not to persist in additional fields: %#v", cfg.AdditionalFields)
|
||||
}
|
||||
out, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal error: %v", err)
|
||||
}
|
||||
if strings.Contains(string(out), "history_split") {
|
||||
t.Fatalf("expected removed history_split field not to marshal, got %s", out)
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Config.Clone ────────────────────────────────────────────────────
|
||||
|
||||
func TestConfigCloneIsDeepCopy(t *testing.T) {
|
||||
falseVal := false
|
||||
trueVal := true
|
||||
turns := 2
|
||||
cfg := Config{
|
||||
Keys: []string{"key1"},
|
||||
Accounts: []Account{{Email: "user@test.com", Token: "token"}},
|
||||
ClaudeMapping: map[string]string{
|
||||
"fast": "deepseek-chat",
|
||||
},
|
||||
Compat: CompatConfig{
|
||||
StripReferenceMarkers: &falseVal,
|
||||
},
|
||||
HistorySplit: HistorySplitConfig{
|
||||
Enabled: &trueVal,
|
||||
TriggerAfterTurns: &turns,
|
||||
},
|
||||
Keys: []string{"key1"},
|
||||
Accounts: []Account{{Email: "user@test.com", Token: "token"}},
|
||||
ModelAliases: map[string]string{"claude-sonnet-4-6": "deepseek-v4-flash"},
|
||||
AdditionalFields: map[string]any{"custom": "value"},
|
||||
}
|
||||
|
||||
@@ -282,16 +301,7 @@ func TestConfigCloneIsDeepCopy(t *testing.T) {
|
||||
// Modify original
|
||||
cfg.Keys[0] = "modified"
|
||||
cfg.Accounts[0].Email = "modified@test.com"
|
||||
cfg.ClaudeMapping["fast"] = "modified-model"
|
||||
if cfg.Compat.StripReferenceMarkers != nil {
|
||||
*cfg.Compat.StripReferenceMarkers = true
|
||||
}
|
||||
if cfg.HistorySplit.Enabled != nil {
|
||||
*cfg.HistorySplit.Enabled = false
|
||||
}
|
||||
if cfg.HistorySplit.TriggerAfterTurns != nil {
|
||||
*cfg.HistorySplit.TriggerAfterTurns = 5
|
||||
}
|
||||
cfg.ModelAliases["claude-sonnet-4-6"] = "modified-model"
|
||||
|
||||
// Cloned should not be affected
|
||||
if cloned.Keys[0] != "key1" {
|
||||
@@ -300,17 +310,8 @@ func TestConfigCloneIsDeepCopy(t *testing.T) {
|
||||
if cloned.Accounts[0].Email != "user@test.com" {
|
||||
t.Fatalf("clone accounts was affected: %#v", cloned.Accounts)
|
||||
}
|
||||
if cloned.ClaudeMapping["fast"] != "deepseek-chat" {
|
||||
t.Fatalf("clone claude mapping was affected: %#v", cloned.ClaudeMapping)
|
||||
}
|
||||
if cloned.Compat.StripReferenceMarkers == nil || *cloned.Compat.StripReferenceMarkers {
|
||||
t.Fatalf("clone compat was affected: %#v", cloned.Compat.StripReferenceMarkers)
|
||||
}
|
||||
if cloned.HistorySplit.Enabled == nil || !*cloned.HistorySplit.Enabled {
|
||||
t.Fatalf("clone history split enabled was affected: %#v", cloned.HistorySplit.Enabled)
|
||||
}
|
||||
if cloned.HistorySplit.TriggerAfterTurns == nil || *cloned.HistorySplit.TriggerAfterTurns != 2 {
|
||||
t.Fatalf("clone history split trigger was affected: %#v", cloned.HistorySplit.TriggerAfterTurns)
|
||||
if cloned.ModelAliases["claude-sonnet-4-6"] != "deepseek-v4-flash" {
|
||||
t.Fatalf("clone model aliases was affected: %#v", cloned.ModelAliases)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -450,53 +451,9 @@ func TestStoreFindAccountNotFound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreCompatWideInputStrictOutputDefaultTrue(t *testing.T) {
|
||||
t.Setenv("DS2API_CONFIG_JSON", `{"keys":["k1"],"accounts":[]}`)
|
||||
store := LoadStore()
|
||||
if !store.CompatWideInputStrictOutput() {
|
||||
t.Fatal("expected default wide_input_strict_output=true when unset")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreCompatWideInputStrictOutputCanDisable(t *testing.T) {
|
||||
t.Setenv("DS2API_CONFIG_JSON", `{"keys":["k1"],"accounts":[],"compat":{"wide_input_strict_output":false}}`)
|
||||
store := LoadStore()
|
||||
if store.CompatWideInputStrictOutput() {
|
||||
t.Fatal("expected wide_input_strict_output=false when explicitly configured")
|
||||
}
|
||||
|
||||
snap := store.Snapshot()
|
||||
data, err := snap.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatalf("marshal failed: %v", err)
|
||||
}
|
||||
var out map[string]any
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
t.Fatalf("decode failed: %v", err)
|
||||
}
|
||||
rawCompat, ok := out["compat"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected compat in marshaled output, got %#v", out)
|
||||
}
|
||||
if rawCompat["wide_input_strict_output"] != false {
|
||||
t.Fatalf("expected explicit false in compat, got %#v", rawCompat)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreCompatStripReferenceMarkersDefaultTrue(t *testing.T) {
|
||||
t.Setenv("DS2API_CONFIG_JSON", `{"keys":["k1"],"accounts":[]}`)
|
||||
store := LoadStore()
|
||||
if !store.CompatStripReferenceMarkers() {
|
||||
t.Fatal("expected default strip_reference_markers=true when unset")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreCompatStripReferenceMarkersCanDisable(t *testing.T) {
|
||||
func TestStoreIgnoresRemovedCompatConfig(t *testing.T) {
|
||||
t.Setenv("DS2API_CONFIG_JSON", `{"keys":["k1"],"accounts":[],"compat":{"strip_reference_markers":false}}`)
|
||||
store := LoadStore()
|
||||
if store.CompatStripReferenceMarkers() {
|
||||
t.Fatal("expected strip_reference_markers=false when explicitly configured")
|
||||
}
|
||||
|
||||
snap := store.Snapshot()
|
||||
data, err := snap.MarshalJSON()
|
||||
@@ -507,12 +464,8 @@ func TestStoreCompatStripReferenceMarkersCanDisable(t *testing.T) {
|
||||
if err := json.Unmarshal(data, &out); err != nil {
|
||||
t.Fatalf("decode failed: %v", err)
|
||||
}
|
||||
rawCompat, ok := out["compat"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("expected compat in marshaled output, got %#v", out)
|
||||
}
|
||||
if rawCompat["strip_reference_markers"] != false {
|
||||
t.Fatalf("expected explicit false in compat, got %#v", rawCompat)
|
||||
if _, ok := out["compat"]; ok {
|
||||
t.Fatalf("expected removed compat field not to marshal, got %#v", out)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -652,25 +605,27 @@ func TestNormalizeCredentialsPrefersStructuredAPIKeys(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreClaudeMapping(t *testing.T) {
|
||||
t.Setenv("DS2API_CONFIG_JSON", `{"keys":[],"accounts":[],"claude_mapping":{"fast":"deepseek-chat","slow":"deepseek-reasoner"}}`)
|
||||
func TestStoreModelAliasesIncludesDefaultsAndOverrides(t *testing.T) {
|
||||
t.Setenv("DS2API_CONFIG_JSON", `{"keys":[],"accounts":[],"model_aliases":{"claude-opus-4-6":"deepseek-v4-pro-search"}}`)
|
||||
store := LoadStore()
|
||||
mapping := store.ClaudeMapping()
|
||||
if mapping["fast"] != "deepseek-chat" {
|
||||
t.Fatalf("unexpected fast mapping: %q", mapping["fast"])
|
||||
aliases := store.ModelAliases()
|
||||
if aliases["claude-sonnet-4-6"] != "deepseek-v4-flash" {
|
||||
t.Fatalf("expected default alias to remain available, got %q", aliases["claude-sonnet-4-6"])
|
||||
}
|
||||
if mapping["slow"] != "deepseek-reasoner" {
|
||||
t.Fatalf("unexpected slow mapping: %q", mapping["slow"])
|
||||
if aliases["claude-opus-4-6"] != "deepseek-v4-pro-search" {
|
||||
t.Fatalf("expected custom alias override, got %q", aliases["claude-opus-4-6"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreClaudeMappingEmpty(t *testing.T) {
|
||||
func TestStoreModelAliasesDefault(t *testing.T) {
|
||||
t.Setenv("DS2API_CONFIG_JSON", `{"keys":[],"accounts":[]}`)
|
||||
store := LoadStore()
|
||||
mapping := store.ClaudeMapping()
|
||||
// Even without config mapping, there are defaults
|
||||
if mapping == nil {
|
||||
t.Fatal("expected non-nil mapping (may contain defaults)")
|
||||
aliases := store.ModelAliases()
|
||||
if aliases == nil {
|
||||
t.Fatal("expected non-nil aliases")
|
||||
}
|
||||
if aliases["claude-sonnet-4-6"] != "deepseek-v4-flash" {
|
||||
t.Fatalf("expected built-in alias, got %q", aliases["claude-sonnet-4-6"])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -720,18 +675,16 @@ func TestOpenAIModelsResponse(t *testing.T) {
|
||||
t.Fatal("expected non-empty models list")
|
||||
}
|
||||
expected := map[string]bool{
|
||||
"deepseek-chat": false,
|
||||
"deepseek-reasoner": false,
|
||||
"deepseek-chat-search": false,
|
||||
"deepseek-reasoner-search": false,
|
||||
"deepseek-expert-chat": false,
|
||||
"deepseek-expert-reasoner": false,
|
||||
"deepseek-expert-chat-search": false,
|
||||
"deepseek-expert-reasoner-search": false,
|
||||
"deepseek-vision-chat": false,
|
||||
"deepseek-vision-reasoner": false,
|
||||
"deepseek-vision-chat-search": false,
|
||||
"deepseek-vision-reasoner-search": false,
|
||||
"deepseek-v4-flash": false,
|
||||
"deepseek-v4-flash-nothinking": false,
|
||||
"deepseek-v4-pro": false,
|
||||
"deepseek-v4-pro-nothinking": false,
|
||||
"deepseek-v4-flash-search": false,
|
||||
"deepseek-v4-flash-search-nothinking": false,
|
||||
"deepseek-v4-pro-search": false,
|
||||
"deepseek-v4-pro-search-nothinking": false,
|
||||
"deepseek-v4-vision": false,
|
||||
"deepseek-v4-vision-nothinking": false,
|
||||
}
|
||||
for _, model := range data {
|
||||
if _, ok := expected[model.ID]; ok {
|
||||
|
||||
@@ -144,6 +144,44 @@ func TestLoadStoreIgnoresLegacyConfigJSONEnv(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestExplicitMissingConfigPathBootstrapsEmptyFileBackedStore(t *testing.T) {
|
||||
path := t.TempDir() + "/config.json"
|
||||
|
||||
t.Setenv("DS2API_CONFIG_JSON", "")
|
||||
t.Setenv("DS2API_CONFIG_PATH", path)
|
||||
|
||||
store, err := LoadStoreWithError()
|
||||
if err != nil {
|
||||
t.Fatalf("expected missing explicit config path to bootstrap, got: %v", err)
|
||||
}
|
||||
if store.IsEnvBacked() {
|
||||
t.Fatal("expected bootstrap store to be file-backed")
|
||||
}
|
||||
if store.ConfigPath() != path {
|
||||
t.Fatalf("ConfigPath() = %q, want %q", store.ConfigPath(), path)
|
||||
}
|
||||
if len(store.Keys()) != 0 || len(store.Accounts()) != 0 {
|
||||
t.Fatalf("expected empty bootstrap config, got keys=%d accounts=%d", len(store.Keys()), len(store.Accounts()))
|
||||
}
|
||||
if _, statErr := os.Stat(path); !errors.Is(statErr, os.ErrNotExist) {
|
||||
t.Fatalf("expected bootstrap not to create config until first save, stat err=%v", statErr)
|
||||
}
|
||||
|
||||
if err := store.Update(func(c *Config) error {
|
||||
c.Keys = []string{"first-key"}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("update should persist bootstrap config: %v", err)
|
||||
}
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("expected first update to write config: %v", err)
|
||||
}
|
||||
if !strings.Contains(string(content), "first-key") {
|
||||
t.Fatalf("expected saved config to contain first key, got: %s", content)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnvBackedStoreWritebackBootstrapsMissingConfigFile(t *testing.T) {
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "config-*.json")
|
||||
if err != nil {
|
||||
|
||||
@@ -7,26 +7,88 @@ type mockModelAliasReader map[string]string
|
||||
func (m mockModelAliasReader) ModelAliases() map[string]string { return m }
|
||||
|
||||
func TestResolveModelDirectDeepSeek(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "deepseek-chat")
|
||||
if !ok || got != "deepseek-chat" {
|
||||
t.Fatalf("expected deepseek-chat, got ok=%v model=%q", ok, got)
|
||||
got, ok := ResolveModel(nil, "deepseek-v4-flash")
|
||||
if !ok || got != "deepseek-v4-flash" {
|
||||
t.Fatalf("expected deepseek-v4-flash, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelDirectDeepSeekNoThinking(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "deepseek-v4-flash-nothinking")
|
||||
if !ok || got != "deepseek-v4-flash-nothinking" {
|
||||
t.Fatalf("expected deepseek-v4-flash-nothinking, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelAlias(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "gpt-4.1")
|
||||
if !ok || got != "deepseek-chat" {
|
||||
t.Fatalf("expected alias gpt-4.1 -> deepseek-chat, got ok=%v model=%q", ok, got)
|
||||
if !ok || got != "deepseek-v4-flash" {
|
||||
t.Fatalf("expected alias gpt-4.1 -> deepseek-v4-flash, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveLatestOpenAIAlias(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "gpt-5.5")
|
||||
if !ok || got != "deepseek-v4-flash" {
|
||||
t.Fatalf("expected alias gpt-5.5 -> deepseek-v4-flash, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveLatestClaudeAlias(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "claude-sonnet-4-6")
|
||||
if !ok || got != "deepseek-v4-flash" {
|
||||
t.Fatalf("expected alias claude-sonnet-4-6 -> deepseek-v4-flash, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveLatestClaudeAliasNoThinking(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "claude-sonnet-4-6-nothinking")
|
||||
if !ok || got != "deepseek-v4-flash-nothinking" {
|
||||
t.Fatalf("expected alias claude-sonnet-4-6-nothinking -> deepseek-v4-flash-nothinking, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveExpandedHistoricalAliases(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
model string
|
||||
want string
|
||||
}{
|
||||
{name: "openai old chatgpt", model: "chatgpt-4o", want: "deepseek-v4-flash"},
|
||||
{name: "openai codex max", model: "gpt-5.1-codex-max", want: "deepseek-v4-pro"},
|
||||
{name: "openai deep research", model: "o3-deep-research", want: "deepseek-v4-pro-search"},
|
||||
{name: "openai historical reasoning", model: "o1-preview", want: "deepseek-v4-pro"},
|
||||
{name: "claude latest historical", model: "claude-3-5-sonnet-latest", want: "deepseek-v4-flash"},
|
||||
{name: "claude historical opus", model: "claude-3-opus-20240229", want: "deepseek-v4-pro"},
|
||||
{name: "claude historical haiku", model: "claude-3-haiku-20240307", want: "deepseek-v4-flash"},
|
||||
{name: "gemini latest alias", model: "gemini-flash-latest", want: "deepseek-v4-flash"},
|
||||
{name: "gemini historical pro", model: "gemini-1.5-pro", want: "deepseek-v4-pro"},
|
||||
{name: "gemini vision legacy", model: "gemini-pro-vision", want: "deepseek-v4-vision"},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, tc.model)
|
||||
if !ok || got != tc.want {
|
||||
t.Fatalf("expected alias %s -> %s, got ok=%v model=%q", tc.model, tc.want, ok, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelHeuristicReasoner(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "o3-super")
|
||||
if !ok || got != "deepseek-reasoner" {
|
||||
if !ok || got != "deepseek-v4-pro" {
|
||||
t.Fatalf("expected heuristic reasoner, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelHeuristicReasonerNoThinking(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "o3-super-nothinking")
|
||||
if !ok || got != "deepseek-v4-pro-nothinking" {
|
||||
t.Fatalf("expected heuristic reasoner nothinking, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelUnknown(t *testing.T) {
|
||||
_, ok := ResolveModel(nil, "totally-custom-model")
|
||||
if ok {
|
||||
@@ -34,28 +96,65 @@ func TestResolveModelUnknown(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelRejectsLegacyDeepSeekIDs(t *testing.T) {
|
||||
legacyModels := []string{
|
||||
"deepseek-chat",
|
||||
"deepseek-reasoner",
|
||||
"deepseek-chat-search",
|
||||
"deepseek-reasoner-search",
|
||||
"deepseek-expert-chat",
|
||||
"deepseek-expert-reasoner",
|
||||
"deepseek-vision-chat",
|
||||
}
|
||||
for _, model := range legacyModels {
|
||||
if got, ok := ResolveModel(nil, model); ok {
|
||||
t.Fatalf("expected legacy model %q to be rejected, got %q", model, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelRejectsRetiredHistoricalModels(t *testing.T) {
|
||||
retiredModels := []string{
|
||||
"claude-2.1",
|
||||
"claude-instant-1.2",
|
||||
"gpt-3.5-turbo",
|
||||
}
|
||||
for _, model := range retiredModels {
|
||||
if got, ok := ResolveModel(nil, model); ok {
|
||||
t.Fatalf("expected retired model %q to be rejected, got %q", model, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelDirectDeepSeekExpert(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "deepseek-expert-chat")
|
||||
if !ok || got != "deepseek-expert-chat" {
|
||||
t.Fatalf("expected deepseek-expert-chat, got ok=%v model=%q", ok, got)
|
||||
got, ok := ResolveModel(nil, "deepseek-v4-pro")
|
||||
if !ok || got != "deepseek-v4-pro" {
|
||||
t.Fatalf("expected deepseek-v4-pro, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelCustomAliasToExpert(t *testing.T) {
|
||||
got, ok := ResolveModel(mockModelAliasReader{
|
||||
"my-expert-model": "deepseek-expert-reasoner-search",
|
||||
"my-expert-model": "deepseek-v4-pro-search",
|
||||
}, "my-expert-model")
|
||||
if !ok || got != "deepseek-expert-reasoner-search" {
|
||||
t.Fatalf("expected alias -> deepseek-expert-reasoner-search, got ok=%v model=%q", ok, got)
|
||||
if !ok || got != "deepseek-v4-pro-search" {
|
||||
t.Fatalf("expected alias -> deepseek-v4-pro-search, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelCustomAliasToVision(t *testing.T) {
|
||||
got, ok := ResolveModel(mockModelAliasReader{
|
||||
"my-vision-model": "deepseek-vision-chat-search",
|
||||
"my-vision-model": "deepseek-v4-vision",
|
||||
}, "my-vision-model")
|
||||
if !ok || got != "deepseek-vision-chat-search" {
|
||||
t.Fatalf("expected alias -> deepseek-vision-chat-search, got ok=%v model=%q", ok, got)
|
||||
if !ok || got != "deepseek-v4-vision" {
|
||||
t.Fatalf("expected alias -> deepseek-v4-vision, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveModelHeuristicVisionIgnoresSearchSuffix(t *testing.T) {
|
||||
got, ok := ResolveModel(nil, "gemini-vision-search")
|
||||
if !ok || got != "deepseek-v4-vision" {
|
||||
t.Fatalf("expected heuristic vision alias to resolve without search variant, got ok=%v model=%q", ok, got)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,29 +14,26 @@ type ModelAliasReader interface {
|
||||
ModelAliases() map[string]string
|
||||
}
|
||||
|
||||
var DeepSeekModels = []ModelInfo{
|
||||
{ID: "deepseek-chat", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-reasoner", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-chat-search", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-reasoner-search", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-expert-chat", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-expert-reasoner", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-expert-chat-search", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-expert-reasoner-search", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-vision-chat", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-vision-reasoner", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-vision-chat-search", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-vision-reasoner-search", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
const noThinkingModelSuffix = "-nothinking"
|
||||
|
||||
var deepSeekBaseModels = []ModelInfo{
|
||||
{ID: "deepseek-v4-flash", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-v4-pro", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-v4-flash-search", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-v4-pro-search", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
{ID: "deepseek-v4-vision", Object: "model", Created: 1677610602, OwnedBy: "deepseek", Permission: []any{}},
|
||||
}
|
||||
|
||||
var ClaudeModels = []ModelInfo{
|
||||
var DeepSeekModels = appendNoThinkingVariants(deepSeekBaseModels)
|
||||
|
||||
var claudeBaseModels = []ModelInfo{
|
||||
// Current aliases
|
||||
{ID: "claude-opus-4-6", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-sonnet-4-5", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-sonnet-4-6", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-haiku-4-5", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
|
||||
// Current snapshots
|
||||
{ID: "claude-opus-4-5-20251101", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
// Claude 4.x snapshots and prior aliases kept for compatibility
|
||||
{ID: "claude-sonnet-4-5", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-opus-4-1", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-opus-4-1-20250805", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-opus-4-0", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
@@ -57,57 +54,33 @@ var ClaudeModels = []ModelInfo{
|
||||
{ID: "claude-3-5-haiku-latest", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-3-5-haiku-20241022", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-3-haiku-20240307", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
|
||||
// Claude 2.x and 1.x (retired but accepted for compatibility)
|
||||
{ID: "claude-2.1", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-2.0", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-1.3", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-1.2", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-1.1", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-1.0", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-instant-1.2", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-instant-1.1", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
{ID: "claude-instant-1.0", Object: "model", Created: 1715635200, OwnedBy: "anthropic"},
|
||||
}
|
||||
|
||||
var ClaudeModels = appendNoThinkingVariants(claudeBaseModels)
|
||||
|
||||
func GetModelConfig(model string) (thinking bool, search bool, ok bool) {
|
||||
switch lower(model) {
|
||||
case "deepseek-chat":
|
||||
return false, false, true
|
||||
case "deepseek-reasoner":
|
||||
return true, false, true
|
||||
case "deepseek-chat-search":
|
||||
return false, true, true
|
||||
case "deepseek-reasoner-search":
|
||||
return true, true, true
|
||||
case "deepseek-expert-chat":
|
||||
return false, false, true
|
||||
case "deepseek-expert-reasoner":
|
||||
return true, false, true
|
||||
case "deepseek-expert-chat-search":
|
||||
return false, true, true
|
||||
case "deepseek-expert-reasoner-search":
|
||||
return true, true, true
|
||||
case "deepseek-vision-chat":
|
||||
return false, false, true
|
||||
case "deepseek-vision-reasoner":
|
||||
return true, false, true
|
||||
case "deepseek-vision-chat-search":
|
||||
return false, true, true
|
||||
case "deepseek-vision-reasoner-search":
|
||||
return true, true, true
|
||||
baseModel, noThinking := splitNoThinkingModel(model)
|
||||
if baseModel == "" {
|
||||
return false, false, false
|
||||
}
|
||||
switch baseModel {
|
||||
case "deepseek-v4-flash", "deepseek-v4-pro", "deepseek-v4-vision":
|
||||
return !noThinking, false, true
|
||||
case "deepseek-v4-flash-search", "deepseek-v4-pro-search":
|
||||
return !noThinking, true, true
|
||||
default:
|
||||
return false, false, false
|
||||
}
|
||||
}
|
||||
|
||||
func GetModelType(model string) (modelType string, ok bool) {
|
||||
switch lower(model) {
|
||||
case "deepseek-chat", "deepseek-reasoner", "deepseek-chat-search", "deepseek-reasoner-search":
|
||||
baseModel, _ := splitNoThinkingModel(model)
|
||||
switch baseModel {
|
||||
case "deepseek-v4-flash", "deepseek-v4-flash-search":
|
||||
return "default", true
|
||||
case "deepseek-expert-chat", "deepseek-expert-reasoner", "deepseek-expert-chat-search", "deepseek-expert-reasoner-search":
|
||||
case "deepseek-v4-pro", "deepseek-v4-pro-search":
|
||||
return "expert", true
|
||||
case "deepseek-vision-chat", "deepseek-vision-reasoner", "deepseek-vision-chat-search", "deepseek-vision-reasoner-search":
|
||||
case "deepseek-v4-vision":
|
||||
return "vision", true
|
||||
default:
|
||||
return "", false
|
||||
@@ -119,29 +92,112 @@ func IsSupportedDeepSeekModel(model string) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
func IsNoThinkingModel(model string) bool {
|
||||
_, noThinking := splitNoThinkingModel(model)
|
||||
return noThinking
|
||||
}
|
||||
|
||||
func DefaultModelAliases() map[string]string {
|
||||
return map[string]string{
|
||||
"gpt-4o": "deepseek-chat",
|
||||
"gpt-4.1": "deepseek-chat",
|
||||
"gpt-4.1-mini": "deepseek-chat",
|
||||
"gpt-4.1-nano": "deepseek-chat",
|
||||
"gpt-5": "deepseek-chat",
|
||||
"gpt-5-mini": "deepseek-chat",
|
||||
"gpt-5-codex": "deepseek-reasoner",
|
||||
"o1": "deepseek-reasoner",
|
||||
"o1-mini": "deepseek-reasoner",
|
||||
"o3": "deepseek-reasoner",
|
||||
"o3-mini": "deepseek-reasoner",
|
||||
"claude-sonnet-4-5": "deepseek-chat",
|
||||
"claude-haiku-4-5": "deepseek-chat",
|
||||
"claude-opus-4-6": "deepseek-reasoner",
|
||||
"claude-3-5-sonnet": "deepseek-chat",
|
||||
"claude-3-5-haiku": "deepseek-chat",
|
||||
"claude-3-opus": "deepseek-reasoner",
|
||||
"gemini-2.5-pro": "deepseek-chat",
|
||||
"gemini-2.5-flash": "deepseek-chat",
|
||||
"llama-3.1-70b-instruct": "deepseek-chat",
|
||||
"qwen-max": "deepseek-chat",
|
||||
// OpenAI GPT / ChatGPT families
|
||||
"chatgpt-4o": "deepseek-v4-flash",
|
||||
"gpt-4": "deepseek-v4-flash",
|
||||
"gpt-4-turbo": "deepseek-v4-flash",
|
||||
"gpt-4-turbo-preview": "deepseek-v4-flash",
|
||||
"gpt-4.5-preview": "deepseek-v4-flash",
|
||||
"gpt-4o": "deepseek-v4-flash",
|
||||
"gpt-4o-mini": "deepseek-v4-flash",
|
||||
"gpt-4.1": "deepseek-v4-flash",
|
||||
"gpt-4.1-mini": "deepseek-v4-flash",
|
||||
"gpt-4.1-nano": "deepseek-v4-flash",
|
||||
"gpt-5": "deepseek-v4-flash",
|
||||
"gpt-5-chat": "deepseek-v4-flash",
|
||||
"gpt-5.1": "deepseek-v4-flash",
|
||||
"gpt-5.1-chat": "deepseek-v4-flash",
|
||||
"gpt-5.2": "deepseek-v4-flash",
|
||||
"gpt-5.2-chat": "deepseek-v4-flash",
|
||||
"gpt-5.3-chat": "deepseek-v4-flash",
|
||||
"gpt-5.4": "deepseek-v4-flash",
|
||||
"gpt-5.5": "deepseek-v4-flash",
|
||||
"gpt-5-mini": "deepseek-v4-flash",
|
||||
"gpt-5-nano": "deepseek-v4-flash",
|
||||
"gpt-5.4-mini": "deepseek-v4-flash",
|
||||
"gpt-5.4-nano": "deepseek-v4-flash",
|
||||
"gpt-5-pro": "deepseek-v4-pro",
|
||||
"gpt-5.2-pro": "deepseek-v4-pro",
|
||||
"gpt-5.4-pro": "deepseek-v4-pro",
|
||||
"gpt-5.5-pro": "deepseek-v4-pro",
|
||||
"gpt-5-codex": "deepseek-v4-pro",
|
||||
"gpt-5.1-codex": "deepseek-v4-pro",
|
||||
"gpt-5.1-codex-mini": "deepseek-v4-pro",
|
||||
"gpt-5.1-codex-max": "deepseek-v4-pro",
|
||||
"gpt-5.2-codex": "deepseek-v4-pro",
|
||||
"gpt-5.3-codex": "deepseek-v4-pro",
|
||||
"codex-mini-latest": "deepseek-v4-pro",
|
||||
|
||||
// OpenAI reasoning / research families
|
||||
"o1": "deepseek-v4-pro",
|
||||
"o1-preview": "deepseek-v4-pro",
|
||||
"o1-mini": "deepseek-v4-pro",
|
||||
"o1-pro": "deepseek-v4-pro",
|
||||
"o3": "deepseek-v4-pro",
|
||||
"o3-mini": "deepseek-v4-pro",
|
||||
"o3-pro": "deepseek-v4-pro",
|
||||
"o3-deep-research": "deepseek-v4-pro-search",
|
||||
"o4-mini": "deepseek-v4-pro",
|
||||
"o4-mini-deep-research": "deepseek-v4-pro-search",
|
||||
|
||||
// Claude current and historical aliases
|
||||
"claude-opus-4-6": "deepseek-v4-pro",
|
||||
"claude-opus-4-1": "deepseek-v4-pro",
|
||||
"claude-opus-4-1-20250805": "deepseek-v4-pro",
|
||||
"claude-opus-4-0": "deepseek-v4-pro",
|
||||
"claude-opus-4-20250514": "deepseek-v4-pro",
|
||||
"claude-sonnet-4-6": "deepseek-v4-flash",
|
||||
"claude-sonnet-4-5": "deepseek-v4-flash",
|
||||
"claude-sonnet-4-5-20250929": "deepseek-v4-flash",
|
||||
"claude-sonnet-4-0": "deepseek-v4-flash",
|
||||
"claude-sonnet-4-20250514": "deepseek-v4-flash",
|
||||
"claude-haiku-4-5": "deepseek-v4-flash",
|
||||
"claude-haiku-4-5-20251001": "deepseek-v4-flash",
|
||||
"claude-3-7-sonnet": "deepseek-v4-flash",
|
||||
"claude-3-7-sonnet-latest": "deepseek-v4-flash",
|
||||
"claude-3-7-sonnet-20250219": "deepseek-v4-flash",
|
||||
"claude-3-5-sonnet": "deepseek-v4-flash",
|
||||
"claude-3-5-sonnet-latest": "deepseek-v4-flash",
|
||||
"claude-3-5-sonnet-20240620": "deepseek-v4-flash",
|
||||
"claude-3-5-sonnet-20241022": "deepseek-v4-flash",
|
||||
"claude-3-5-haiku": "deepseek-v4-flash",
|
||||
"claude-3-5-haiku-latest": "deepseek-v4-flash",
|
||||
"claude-3-5-haiku-20241022": "deepseek-v4-flash",
|
||||
"claude-3-opus": "deepseek-v4-pro",
|
||||
"claude-3-opus-20240229": "deepseek-v4-pro",
|
||||
"claude-3-sonnet": "deepseek-v4-flash",
|
||||
"claude-3-sonnet-20240229": "deepseek-v4-flash",
|
||||
"claude-3-haiku": "deepseek-v4-flash",
|
||||
"claude-3-haiku-20240307": "deepseek-v4-flash",
|
||||
|
||||
// Gemini current and historical text / multimodal models
|
||||
"gemini-pro": "deepseek-v4-pro",
|
||||
"gemini-pro-vision": "deepseek-v4-vision",
|
||||
"gemini-pro-latest": "deepseek-v4-pro",
|
||||
"gemini-flash-latest": "deepseek-v4-flash",
|
||||
"gemini-1.5-pro": "deepseek-v4-pro",
|
||||
"gemini-1.5-flash": "deepseek-v4-flash",
|
||||
"gemini-1.5-flash-8b": "deepseek-v4-flash",
|
||||
"gemini-2.0-flash": "deepseek-v4-flash",
|
||||
"gemini-2.0-flash-lite": "deepseek-v4-flash",
|
||||
"gemini-2.5-pro": "deepseek-v4-pro",
|
||||
"gemini-2.5-flash": "deepseek-v4-flash",
|
||||
"gemini-2.5-flash-lite": "deepseek-v4-flash",
|
||||
"gemini-3.1-pro": "deepseek-v4-pro",
|
||||
"gemini-3-pro": "deepseek-v4-pro",
|
||||
"gemini-3-flash": "deepseek-v4-flash",
|
||||
"gemini-3.1-flash": "deepseek-v4-flash",
|
||||
"gemini-3.1-flash-lite": "deepseek-v4-flash",
|
||||
|
||||
"llama-3.1-70b-instruct": "deepseek-v4-flash",
|
||||
"qwen-max": "deepseek-v4-flash",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,52 +206,33 @@ func ResolveModel(store ModelAliasReader, requested string) (string, bool) {
|
||||
if model == "" {
|
||||
return "", false
|
||||
}
|
||||
aliases := loadModelAliases(store)
|
||||
if IsSupportedDeepSeekModel(model) {
|
||||
return model, true
|
||||
}
|
||||
aliases := DefaultModelAliases()
|
||||
if store != nil {
|
||||
for k, v := range store.ModelAliases() {
|
||||
aliases[lower(strings.TrimSpace(k))] = lower(strings.TrimSpace(v))
|
||||
}
|
||||
}
|
||||
if mapped, ok := aliases[model]; ok && IsSupportedDeepSeekModel(mapped) {
|
||||
return mapped, true
|
||||
}
|
||||
if strings.HasPrefix(model, "deepseek-") {
|
||||
baseModel, noThinking := splitNoThinkingModel(model)
|
||||
resolvedModel, ok := resolveCanonicalModel(aliases, baseModel)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return withNoThinkingVariant(resolvedModel, noThinking), true
|
||||
}
|
||||
|
||||
knownFamily := false
|
||||
for _, prefix := range []string{
|
||||
"gpt-", "o1", "o3", "claude-", "gemini-", "llama-", "qwen-", "mistral-", "command-",
|
||||
} {
|
||||
if strings.HasPrefix(model, prefix) {
|
||||
knownFamily = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !knownFamily {
|
||||
return "", false
|
||||
}
|
||||
|
||||
useReasoner := strings.Contains(model, "reason") ||
|
||||
strings.Contains(model, "reasoner") ||
|
||||
strings.HasPrefix(model, "o1") ||
|
||||
strings.HasPrefix(model, "o3") ||
|
||||
strings.Contains(model, "opus") ||
|
||||
strings.Contains(model, "r1")
|
||||
useSearch := strings.Contains(model, "search")
|
||||
|
||||
func isRetiredHistoricalModel(model string) bool {
|
||||
switch {
|
||||
case useReasoner && useSearch:
|
||||
return "deepseek-reasoner-search", true
|
||||
case useReasoner:
|
||||
return "deepseek-reasoner", true
|
||||
case useSearch:
|
||||
return "deepseek-chat-search", true
|
||||
case strings.HasPrefix(model, "claude-1."):
|
||||
return true
|
||||
case strings.HasPrefix(model, "claude-2."):
|
||||
return true
|
||||
case strings.HasPrefix(model, "claude-instant-"):
|
||||
return true
|
||||
case strings.HasPrefix(model, "gpt-3.5"):
|
||||
return true
|
||||
default:
|
||||
return "deepseek-chat", true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -238,3 +275,98 @@ func ClaudeModelsResponse() map[string]any {
|
||||
resp["has_more"] = false
|
||||
return resp
|
||||
}
|
||||
|
||||
func appendNoThinkingVariants(models []ModelInfo) []ModelInfo {
|
||||
out := make([]ModelInfo, 0, len(models)*2)
|
||||
for _, model := range models {
|
||||
out = append(out, model)
|
||||
variant := model
|
||||
variant.ID = withNoThinkingVariant(model.ID, true)
|
||||
out = append(out, variant)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func splitNoThinkingModel(model string) (string, bool) {
|
||||
model = lower(strings.TrimSpace(model))
|
||||
if strings.HasSuffix(model, noThinkingModelSuffix) {
|
||||
return strings.TrimSuffix(model, noThinkingModelSuffix), true
|
||||
}
|
||||
return model, false
|
||||
}
|
||||
|
||||
func withNoThinkingVariant(model string, enabled bool) string {
|
||||
baseModel, _ := splitNoThinkingModel(model)
|
||||
if !enabled {
|
||||
return baseModel
|
||||
}
|
||||
if baseModel == "" {
|
||||
return ""
|
||||
}
|
||||
return baseModel + noThinkingModelSuffix
|
||||
}
|
||||
|
||||
func loadModelAliases(store ModelAliasReader) map[string]string {
|
||||
aliases := DefaultModelAliases()
|
||||
if store != nil {
|
||||
for k, v := range store.ModelAliases() {
|
||||
aliases[lower(strings.TrimSpace(k))] = lower(strings.TrimSpace(v))
|
||||
}
|
||||
}
|
||||
return aliases
|
||||
}
|
||||
|
||||
func resolveCanonicalModel(aliases map[string]string, model string) (string, bool) {
|
||||
model = lower(strings.TrimSpace(model))
|
||||
if model == "" {
|
||||
return "", false
|
||||
}
|
||||
if isRetiredHistoricalModel(model) {
|
||||
return "", false
|
||||
}
|
||||
if IsSupportedDeepSeekModel(model) {
|
||||
return model, true
|
||||
}
|
||||
if mapped, ok := aliases[model]; ok && IsSupportedDeepSeekModel(mapped) {
|
||||
return mapped, true
|
||||
}
|
||||
if strings.HasPrefix(model, "deepseek-") {
|
||||
return "", false
|
||||
}
|
||||
|
||||
knownFamily := false
|
||||
for _, prefix := range []string{
|
||||
"gpt-", "o1", "o3", "claude-", "gemini-", "llama-", "qwen-", "mistral-", "command-",
|
||||
} {
|
||||
if strings.HasPrefix(model, prefix) {
|
||||
knownFamily = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !knownFamily {
|
||||
return "", false
|
||||
}
|
||||
|
||||
useVision := strings.Contains(model, "vision")
|
||||
useReasoner := strings.Contains(model, "reason") ||
|
||||
strings.Contains(model, "reasoner") ||
|
||||
strings.HasPrefix(model, "o1") ||
|
||||
strings.HasPrefix(model, "o3") ||
|
||||
strings.Contains(model, "opus") ||
|
||||
strings.Contains(model, "slow") ||
|
||||
strings.Contains(model, "r1")
|
||||
useSearch := strings.Contains(model, "search")
|
||||
|
||||
switch {
|
||||
case useVision:
|
||||
return "deepseek-v4-vision", true
|
||||
case useReasoner && useSearch:
|
||||
return "deepseek-v4-pro-search", true
|
||||
case useReasoner:
|
||||
return "deepseek-v4-pro", true
|
||||
case useSearch:
|
||||
return "deepseek-v4-flash-search", true
|
||||
default:
|
||||
return "deepseek-v4-flash", true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,9 +30,29 @@ func ResolvePath(envKey, defaultRel string) string {
|
||||
}
|
||||
|
||||
func ConfigPath() string {
|
||||
if strings.TrimSpace(os.Getenv("DS2API_CONFIG_PATH")) == "" && BaseDir() == "/app" {
|
||||
return containerDefaultConfigPath()
|
||||
}
|
||||
return ResolvePath("DS2API_CONFIG_PATH", "config.json")
|
||||
}
|
||||
|
||||
func containerDefaultConfigPath() string {
|
||||
// Container images run as non-root by default. Only use /data when mounted/provisioned.
|
||||
// Otherwise keep /app/config.json so admin-side save does not fail on MkdirAll("/data").
|
||||
if st, err := os.Stat("/data"); err == nil && st.IsDir() {
|
||||
return "/data/config.json"
|
||||
}
|
||||
return "/app/config.json"
|
||||
}
|
||||
|
||||
func legacyContainerConfigPath() string {
|
||||
return "/app/config.json"
|
||||
}
|
||||
|
||||
func shouldTryLegacyContainerConfigPath() bool {
|
||||
return strings.TrimSpace(os.Getenv("DS2API_CONFIG_PATH")) == "" && BaseDir() == "/app"
|
||||
}
|
||||
|
||||
func RawStreamSampleRoot() string {
|
||||
return ResolvePath("DS2API_RAW_STREAM_SAMPLE_ROOT", "tests/raw_stream_samples")
|
||||
}
|
||||
|
||||
28
internal/config/paths_test.go
Normal file
28
internal/config/paths_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestContainerDefaultConfigPath(t *testing.T) {
|
||||
t.Run("fallback to /app when /data is missing", func(t *testing.T) {
|
||||
// This test environment does not guarantee a writable/mounted /data.
|
||||
// If /data is absent we must keep /app fallback to avoid persistence failures.
|
||||
if _, err := os.Stat("/data"); err == nil {
|
||||
t.Skip("/data exists in this environment; cannot validate missing-/data fallback")
|
||||
}
|
||||
if got := containerDefaultConfigPath(); got != "/app/config.json" {
|
||||
t.Fatalf("containerDefaultConfigPath() = %q, want %q", got, "/app/config.json")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("prefer /data when /data directory exists", func(t *testing.T) {
|
||||
if _, err := os.Stat("/data"); err != nil {
|
||||
t.Skip("/data does not exist in this environment")
|
||||
}
|
||||
if got := containerDefaultConfigPath(); got != "/data/config.json" {
|
||||
t.Fatalf("containerDefaultConfigPath() = %q, want %q", got, "/data/config.json")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -52,11 +52,12 @@ func loadStore() (*Store, error) {
|
||||
|
||||
func loadConfig() (Config, bool, error) {
|
||||
rawCfg := strings.TrimSpace(os.Getenv("DS2API_CONFIG_JSON"))
|
||||
path := ConfigPath()
|
||||
if rawCfg != "" {
|
||||
cfg, err := parseConfigString(rawCfg)
|
||||
if err != nil {
|
||||
if !IsVercel() && envWritebackEnabled() {
|
||||
if fileCfg, fileErr := loadConfigFromFile(ConfigPath()); fileErr == nil {
|
||||
if fileCfg, fileErr := loadConfigFromFile(path); fileErr == nil {
|
||||
return fileCfg, false, nil
|
||||
}
|
||||
}
|
||||
@@ -67,7 +68,7 @@ func loadConfig() (Config, bool, error) {
|
||||
if IsVercel() || !envWritebackEnabled() {
|
||||
return cfg, true, err
|
||||
}
|
||||
content, fileErr := os.ReadFile(ConfigPath())
|
||||
content, fileErr := os.ReadFile(path)
|
||||
if fileErr == nil {
|
||||
var fileCfg Config
|
||||
if unmarshalErr := json.Unmarshal(content, &fileCfg); unmarshalErr == nil {
|
||||
@@ -79,7 +80,7 @@ func loadConfig() (Config, bool, error) {
|
||||
if validateErr := ValidateConfig(cfg); validateErr != nil {
|
||||
return cfg, true, validateErr
|
||||
}
|
||||
if writeErr := writeConfigFile(ConfigPath(), cfg.Clone()); writeErr == nil {
|
||||
if writeErr := writeConfigFile(path, cfg.Clone()); writeErr == nil {
|
||||
return cfg, false, err
|
||||
} else {
|
||||
Logger.Warn("[config] env writeback bootstrap failed", "error", writeErr)
|
||||
@@ -87,14 +88,23 @@ func loadConfig() (Config, bool, error) {
|
||||
}
|
||||
return cfg, true, err
|
||||
}
|
||||
|
||||
cfg, err := loadConfigFromFile(ConfigPath())
|
||||
cfg, err := loadConfigFromFile(path)
|
||||
if err != nil {
|
||||
if shouldTryLegacyContainerConfigPath() {
|
||||
legacyPath := legacyContainerConfigPath()
|
||||
if legacyCfg, legacyErr := loadConfigFromFile(legacyPath); legacyErr == nil {
|
||||
Logger.Info("[config] loaded legacy container config path", "path", legacyPath)
|
||||
return legacyCfg, false, nil
|
||||
}
|
||||
}
|
||||
if IsVercel() {
|
||||
// Vercel one-click deploy may start without a writable/present config file.
|
||||
// Keep an in-memory config so users can bootstrap via WebUI then sync env.
|
||||
// Vercel may start without writable/present config; keep in-memory bootstrap config.
|
||||
return Config{}, true, nil
|
||||
}
|
||||
if shouldBootstrapMissingConfigFile(err) {
|
||||
Logger.Warn("[config] config file missing; starting with empty file-backed config", "path", path)
|
||||
return Config{}, false, nil
|
||||
}
|
||||
return Config{}, false, err
|
||||
}
|
||||
if IsVercel() {
|
||||
@@ -104,6 +114,10 @@ func loadConfig() (Config, bool, error) {
|
||||
return cfg, false, nil
|
||||
}
|
||||
|
||||
func shouldBootstrapMissingConfigFile(err error) bool {
|
||||
return errors.Is(err, os.ErrNotExist) && strings.TrimSpace(os.Getenv("DS2API_CONFIG_PATH")) != ""
|
||||
}
|
||||
|
||||
func loadConfigFromFile(path string) (Config, error) {
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
|
||||
@@ -6,18 +6,6 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (s *Store) ClaudeMapping() map[string]string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if len(s.cfg.ClaudeModelMap) > 0 {
|
||||
return cloneStringMap(s.cfg.ClaudeModelMap)
|
||||
}
|
||||
if len(s.cfg.ClaudeMapping) > 0 {
|
||||
return cloneStringMap(s.cfg.ClaudeMapping)
|
||||
}
|
||||
return map[string]string{"fast": "deepseek-chat", "slow": "deepseek-reasoner"}
|
||||
}
|
||||
|
||||
func (s *Store) ModelAliases() map[string]string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
@@ -33,24 +21,6 @@ func (s *Store) ModelAliases() map[string]string {
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *Store) CompatWideInputStrictOutput() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.cfg.Compat.WideInputStrictOutput == nil {
|
||||
return true
|
||||
}
|
||||
return *s.cfg.Compat.WideInputStrictOutput
|
||||
}
|
||||
|
||||
func (s *Store) CompatStripReferenceMarkers() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.cfg.Compat.StripReferenceMarkers == nil {
|
||||
return true
|
||||
}
|
||||
return *s.cfg.Compat.StripReferenceMarkers
|
||||
}
|
||||
|
||||
func (s *Store) ToolcallMode() string {
|
||||
return "feature_match"
|
||||
}
|
||||
@@ -175,20 +145,32 @@ func (s *Store) AutoDeleteSessions() bool {
|
||||
return s.AutoDeleteMode() != "none"
|
||||
}
|
||||
|
||||
func (s *Store) HistorySplitEnabled() bool {
|
||||
func (s *Store) CurrentInputFileEnabled() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.cfg.HistorySplit.Enabled == nil {
|
||||
if s.cfg.CurrentInputFile.Enabled == nil {
|
||||
return true
|
||||
}
|
||||
return *s.cfg.HistorySplit.Enabled
|
||||
return *s.cfg.CurrentInputFile.Enabled
|
||||
}
|
||||
|
||||
func (s *Store) HistorySplitTriggerAfterTurns() int {
|
||||
func (s *Store) CurrentInputFileMinChars() int {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.cfg.HistorySplit.TriggerAfterTurns == nil || *s.cfg.HistorySplit.TriggerAfterTurns <= 0 {
|
||||
return 1
|
||||
}
|
||||
return *s.cfg.HistorySplit.TriggerAfterTurns
|
||||
return s.cfg.CurrentInputFile.MinChars
|
||||
}
|
||||
|
||||
func (s *Store) ThinkingInjectionEnabled() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.cfg.ThinkingInjection.Enabled == nil {
|
||||
return true
|
||||
}
|
||||
return *s.cfg.ThinkingInjection.Enabled
|
||||
}
|
||||
|
||||
func (s *Store) ThinkingInjectionPrompt() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return strings.TrimSpace(s.cfg.ThinkingInjection.Prompt)
|
||||
}
|
||||
|
||||
@@ -2,26 +2,45 @@ package config
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestStoreHistorySplitAccessors(t *testing.T) {
|
||||
func TestStoreCurrentInputFileAccessors(t *testing.T) {
|
||||
store := &Store{cfg: Config{}}
|
||||
if !store.HistorySplitEnabled() {
|
||||
t.Fatal("expected history split enabled by default")
|
||||
if !store.CurrentInputFileEnabled() {
|
||||
t.Fatal("expected current input file enabled by default")
|
||||
}
|
||||
if got := store.HistorySplitTriggerAfterTurns(); got != 1 {
|
||||
t.Fatalf("default history split trigger_after_turns=%d want=1", got)
|
||||
if got := store.CurrentInputFileMinChars(); got != 0 {
|
||||
t.Fatalf("default current input file min_chars=%d want=0", got)
|
||||
}
|
||||
|
||||
enabled := false
|
||||
turns := 3
|
||||
store.cfg.HistorySplit = HistorySplitConfig{
|
||||
Enabled: &enabled,
|
||||
TriggerAfterTurns: &turns,
|
||||
store.cfg.CurrentInputFile = CurrentInputFileConfig{Enabled: &enabled, MinChars: 12345}
|
||||
if store.CurrentInputFileEnabled() {
|
||||
t.Fatal("expected current input file disabled")
|
||||
}
|
||||
|
||||
if store.HistorySplitEnabled() {
|
||||
t.Fatal("expected history split disabled after override")
|
||||
enabled = true
|
||||
store.cfg.CurrentInputFile.Enabled = &enabled
|
||||
if !store.CurrentInputFileEnabled() {
|
||||
t.Fatal("expected current input file enabled")
|
||||
}
|
||||
if got := store.HistorySplitTriggerAfterTurns(); got != 3 {
|
||||
t.Fatalf("history split trigger_after_turns=%d want=3", got)
|
||||
if got := store.CurrentInputFileMinChars(); got != 12345 {
|
||||
t.Fatalf("current input file min_chars=%d want=12345", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreThinkingInjectionAccessors(t *testing.T) {
|
||||
store := &Store{cfg: Config{}}
|
||||
if !store.ThinkingInjectionEnabled() {
|
||||
t.Fatal("expected thinking injection enabled by default")
|
||||
}
|
||||
|
||||
disabled := false
|
||||
store.cfg.ThinkingInjection.Enabled = &disabled
|
||||
if store.ThinkingInjectionEnabled() {
|
||||
t.Fatal("expected thinking injection disabled by explicit config")
|
||||
}
|
||||
|
||||
store.cfg.ThinkingInjection.Prompt = " custom thinking prompt "
|
||||
if got := store.ThinkingInjectionPrompt(); got != "custom thinking prompt" {
|
||||
t.Fatalf("thinking injection prompt=%q want custom thinking prompt", got)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func ValidateConfig(c Config) error {
|
||||
if err := ValidateAutoDeleteConfig(c.AutoDelete); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ValidateHistorySplitConfig(c.HistorySplit); err != nil {
|
||||
if err := ValidateCurrentInputFileConfig(c.CurrentInputFile); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ValidateAccountProxyReferences(c.Accounts, c.Proxies); err != nil {
|
||||
@@ -114,11 +114,9 @@ func ValidateAutoDeleteConfig(autoDelete AutoDeleteConfig) error {
|
||||
return ValidateAutoDeleteMode(autoDelete.Mode)
|
||||
}
|
||||
|
||||
func ValidateHistorySplitConfig(historySplit HistorySplitConfig) error {
|
||||
if historySplit.TriggerAfterTurns != nil {
|
||||
if err := ValidateIntRange("history_split.trigger_after_turns", *historySplit.TriggerAfterTurns, 1, 1000, true); err != nil {
|
||||
return err
|
||||
}
|
||||
func ValidateCurrentInputFileConfig(currentInputFile CurrentInputFileConfig) error {
|
||||
if currentInputFile.MinChars != 0 {
|
||||
return ValidateIntRange("current_input_file.min_chars", currentInputFile.MinChars, 1, 100000000, true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -40,11 +40,9 @@ func TestValidateConfigRejectsInvalidValues(t *testing.T) {
|
||||
want: "auto_delete.mode",
|
||||
},
|
||||
{
|
||||
name: "history split",
|
||||
cfg: Config{HistorySplit: HistorySplitConfig{
|
||||
TriggerAfterTurns: intPtr(0),
|
||||
}},
|
||||
want: "history_split.trigger_after_turns",
|
||||
name: "current input file",
|
||||
cfg: Config{CurrentInputFile: CurrentInputFileConfig{MinChars: -1}},
|
||||
want: "current_input_file.min_chars",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -66,5 +64,3 @@ func TestValidateConfigAcceptsLegacyAutoDeleteSessions(t *testing.T) {
|
||||
t.Fatalf("expected legacy auto_delete.sessions config to remain valid, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func intPtr(v int) *int { return &v }
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -28,7 +29,7 @@ func (c *Client) Login(ctx context.Context, acc config.Account) (string, error)
|
||||
} else {
|
||||
return "", errors.New("missing email/mobile")
|
||||
}
|
||||
resp, err := c.postJSON(ctx, clients.regular, clients.fallback, DeepSeekLoginURL, BaseHeaders, payload)
|
||||
resp, err := c.postJSON(ctx, clients.regular, clients.fallback, dsprotocol.DeepSeekLoginURL, dsprotocol.BaseHeaders, payload)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -58,7 +59,7 @@ func (c *Client) CreateSession(ctx context.Context, a *auth.RequestAuth, maxAtte
|
||||
refreshed := false
|
||||
for attempts < maxAttempts {
|
||||
headers := c.authHeaders(a.DeepSeekToken)
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, DeepSeekCreateSessionURL, headers, map[string]any{"agent": "chat"})
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, dsprotocol.DeepSeekCreateSessionURL, headers, map[string]any{"agent": "chat"})
|
||||
if err != nil {
|
||||
config.Logger.Warn("[create_session] request error", "error", err, "account", a.AccountID)
|
||||
attempts++
|
||||
@@ -91,7 +92,7 @@ func (c *Client) CreateSession(ctx context.Context, a *auth.RequestAuth, maxAtte
|
||||
}
|
||||
|
||||
func (c *Client) GetPow(ctx context.Context, a *auth.RequestAuth, maxAttempts int) (string, error) {
|
||||
return c.GetPowForTarget(ctx, a, DeepSeekCompletionTargetPath, maxAttempts)
|
||||
return c.GetPowForTarget(ctx, a, dsprotocol.DeepSeekCompletionTargetPath, maxAttempts)
|
||||
}
|
||||
|
||||
func (c *Client) GetPowForTarget(ctx context.Context, a *auth.RequestAuth, targetPath string, maxAttempts int) (string, error) {
|
||||
@@ -100,16 +101,20 @@ func (c *Client) GetPowForTarget(ctx context.Context, a *auth.RequestAuth, targe
|
||||
}
|
||||
targetPath = strings.TrimSpace(targetPath)
|
||||
if targetPath == "" {
|
||||
targetPath = DeepSeekCompletionTargetPath
|
||||
targetPath = dsprotocol.DeepSeekCompletionTargetPath
|
||||
}
|
||||
clients := c.requestClientsForAuth(ctx, a)
|
||||
attempts := 0
|
||||
refreshed := false
|
||||
lastFailureKind := FailureUnknown
|
||||
lastFailureMessage := ""
|
||||
for attempts < maxAttempts {
|
||||
headers := c.authHeaders(a.DeepSeekToken)
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, DeepSeekCreatePowURL, headers, map[string]any{"target_path": targetPath})
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, dsprotocol.DeepSeekCreatePowURL, headers, map[string]any{"target_path": targetPath})
|
||||
if err != nil {
|
||||
config.Logger.Warn("[get_pow] request error", "error", err, "account", a.AccountID, "target_path", targetPath)
|
||||
lastFailureKind = FailureUnknown
|
||||
lastFailureMessage = err.Error()
|
||||
attempts++
|
||||
continue
|
||||
}
|
||||
@@ -126,6 +131,12 @@ func (c *Client) GetPowForTarget(ctx context.Context, a *auth.RequestAuth, targe
|
||||
return BuildPowHeader(challenge, answer)
|
||||
}
|
||||
config.Logger.Warn("[get_pow] failed", "status", status, "code", code, "biz_code", bizCode, "msg", msg, "biz_msg", bizMsg, "use_config_token", a.UseConfigToken, "account", a.AccountID, "target_path", targetPath)
|
||||
lastFailureMessage = failureMessage(msg, bizMsg, "get pow failed")
|
||||
if isTokenInvalid(status, code, bizCode, msg, bizMsg) || isAuthIndicativeBizFailure(msg, bizMsg) {
|
||||
lastFailureKind = authFailureKind(a.UseConfigToken)
|
||||
} else {
|
||||
lastFailureKind = FailureUnknown
|
||||
}
|
||||
if a.UseConfigToken {
|
||||
if !refreshed && shouldAttemptRefresh(status, code, bizCode, msg, bizMsg) {
|
||||
if c.Auth.RefreshToken(ctx, a) {
|
||||
@@ -141,12 +152,15 @@ func (c *Client) GetPowForTarget(ctx context.Context, a *auth.RequestAuth, targe
|
||||
}
|
||||
attempts++
|
||||
}
|
||||
if lastFailureKind != FailureUnknown {
|
||||
return "", &RequestFailure{Op: "get pow", Kind: lastFailureKind, Message: lastFailureMessage}
|
||||
}
|
||||
return "", errors.New("get pow failed")
|
||||
}
|
||||
|
||||
func (c *Client) authHeaders(token string) map[string]string {
|
||||
headers := make(map[string]string, len(BaseHeaders)+1)
|
||||
for k, v := range BaseHeaders {
|
||||
headers := make(map[string]string, len(dsprotocol.BaseHeaders)+1)
|
||||
for k, v := range dsprotocol.BaseHeaders {
|
||||
headers[k] = v
|
||||
}
|
||||
headers["authorization"] = "Bearer " + token
|
||||
@@ -210,6 +224,23 @@ func isAuthIndicativeBizFailure(msg string, bizMsg string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func authFailureKind(useConfigToken bool) FailureKind {
|
||||
if useConfigToken {
|
||||
return FailureManagedUnauthorized
|
||||
}
|
||||
return FailureDirectUnauthorized
|
||||
}
|
||||
|
||||
func failureMessage(msg string, bizMsg string, fallback string) string {
|
||||
if trimmed := strings.TrimSpace(bizMsg); trimmed != "" {
|
||||
return trimmed
|
||||
}
|
||||
if trimmed := strings.TrimSpace(msg); trimmed != "" {
|
||||
return trimmed
|
||||
}
|
||||
return strings.TrimSpace(fallback)
|
||||
}
|
||||
|
||||
// DeepSeek has returned create-session ids in both biz_data.id and
|
||||
// biz_data.chat_session.id across observed response variants; accept either.
|
||||
func extractCreateSessionID(resp map[string]any) string {
|
||||
@@ -1,4 +1,4 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import "testing"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import "testing"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import "testing"
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
@@ -20,10 +21,10 @@ func (c *Client) CallCompletion(ctx context.Context, a *auth.RequestAuth, payloa
|
||||
clients := c.requestClientsForAuth(ctx, a)
|
||||
headers := c.authHeaders(a.DeepSeekToken)
|
||||
headers["x-ds-pow-response"] = powResp
|
||||
captureSession := c.capture.Start("deepseek_completion", DeepSeekCompletionURL, a.AccountID, payload)
|
||||
captureSession := c.capture.Start("deepseek_completion", dsprotocol.DeepSeekCompletionURL, a.AccountID, payload)
|
||||
attempts := 0
|
||||
for attempts < maxAttempts {
|
||||
resp, err := c.streamPost(ctx, clients.stream, DeepSeekCompletionURL, headers, payload)
|
||||
resp, err := c.streamPost(ctx, clients.stream, dsprotocol.DeepSeekCompletionURL, headers, payload)
|
||||
if err != nil {
|
||||
attempts++
|
||||
time.Sleep(time.Second)
|
||||
@@ -1,11 +1,13 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -26,7 +28,7 @@ type continueState struct {
|
||||
}
|
||||
|
||||
// wrapCompletionWithAutoContinue wraps the completion response body so that
|
||||
// if the upstream indicates the response is incomplete (WIP / INCOMPLETE /
|
||||
// if the upstream indicates the response is incomplete (INCOMPLETE /
|
||||
// AUTO_CONTINUE), ds2api will automatically call the DeepSeek continue
|
||||
// endpoint and splice the continuation SSE stream onto the original.
|
||||
// The caller sees a single, seamless SSE stream.
|
||||
@@ -60,8 +62,8 @@ func (c *Client) callContinue(ctx context.Context, a *auth.RequestAuth, sessionI
|
||||
"fallback_to_resume": true,
|
||||
}
|
||||
config.Logger.Info("[auto_continue] calling continue", "session_id", sessionID, "message_id", responseMessageID)
|
||||
captureSession := c.capture.Start("deepseek_continue", DeepSeekContinueURL, a.AccountID, payload)
|
||||
resp, err := c.streamPost(ctx, clients.stream, DeepSeekContinueURL, headers, payload)
|
||||
captureSession := c.capture.Start("deepseek_continue", dsprotocol.DeepSeekContinueURL, a.AccountID, payload)
|
||||
resp, err := c.streamPost(ctx, clients.stream, dsprotocol.DeepSeekContinueURL, headers, payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -131,33 +133,51 @@ func pumpAutoContinue(ctx context.Context, pw *io.PipeWriter, initial io.ReadClo
|
||||
// sentinels are consumed (not forwarded) so that the downstream only sees
|
||||
// one final [DONE] at the very end.
|
||||
func streamBodyWithContinueState(ctx context.Context, pw *io.PipeWriter, body io.Reader, state *continueState) (bool, error) {
|
||||
scanner := bufio.NewScanner(body)
|
||||
scanner.Buffer(make([]byte, 0, 64*1024), 2*1024*1024)
|
||||
reader := bufio.NewReaderSize(body, 64*1024)
|
||||
hadDone := false
|
||||
for scanner.Scan() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return hadDone, ctx.Err()
|
||||
default:
|
||||
}
|
||||
line := append([]byte{}, scanner.Bytes()...)
|
||||
trimmed := strings.TrimSpace(string(line))
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(trimmed, "data:") {
|
||||
data := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:"))
|
||||
if data == "[DONE]" {
|
||||
hadDone = true
|
||||
continue
|
||||
line, err := reader.ReadBytes('\n')
|
||||
if len(line) == 0 && err != nil {
|
||||
if err == io.EOF {
|
||||
return hadDone, nil
|
||||
}
|
||||
state.observe(data)
|
||||
return hadDone, err
|
||||
}
|
||||
if _, err := io.Copy(pw, bytes.NewReader(append(line, '\n'))); err != nil {
|
||||
trimmed := strings.TrimSpace(string(line))
|
||||
if trimmed != "" {
|
||||
if strings.HasPrefix(trimmed, "data:") {
|
||||
data := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:"))
|
||||
if data == "[DONE]" {
|
||||
hadDone = true
|
||||
if err != nil && err != io.EOF {
|
||||
return hadDone, err
|
||||
}
|
||||
if err == io.EOF {
|
||||
return hadDone, nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
state.observe(data)
|
||||
}
|
||||
if !strings.HasSuffix(string(line), "\n") {
|
||||
line = append(line, '\n')
|
||||
}
|
||||
if _, copyErr := io.Copy(pw, bytes.NewReader(line)); copyErr != nil {
|
||||
return hadDone, copyErr
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return hadDone, nil
|
||||
}
|
||||
return hadDone, err
|
||||
}
|
||||
}
|
||||
return hadDone, scanner.Err()
|
||||
}
|
||||
|
||||
// observe extracts continue-relevant signals from an SSE JSON chunk.
|
||||
@@ -173,49 +193,100 @@ func (s *continueState) observe(data string) {
|
||||
if id := intFrom(chunk["response_message_id"]); id > 0 {
|
||||
s.responseMessageID = id
|
||||
}
|
||||
// Path-based status: {"p": "response/status", "v": "FINISHED"}
|
||||
if p, _ := chunk["p"].(string); p == "response/status" {
|
||||
if status, _ := chunk["v"].(string); status != "" {
|
||||
s.lastStatus = strings.TrimSpace(status)
|
||||
if strings.EqualFold(s.lastStatus, "FINISHED") {
|
||||
s.finished = true
|
||||
}
|
||||
}
|
||||
s.observeDirectPatch(asString(chunk["p"]), chunk["v"])
|
||||
if p, _ := chunk["p"].(string); p == "response" {
|
||||
s.observeBatchPatches("response", chunk["v"])
|
||||
} else {
|
||||
s.observeBatchPatches("", chunk["v"])
|
||||
}
|
||||
// Nested v.response
|
||||
v, _ := chunk["v"].(map[string]any)
|
||||
if response, _ := v["response"].(map[string]any); response != nil {
|
||||
if id := intFrom(response["message_id"]); id > 0 {
|
||||
s.responseMessageID = id
|
||||
}
|
||||
if status, _ := response["status"].(string); status != "" {
|
||||
s.lastStatus = strings.TrimSpace(status)
|
||||
if strings.EqualFold(s.lastStatus, "FINISHED") {
|
||||
s.finished = true
|
||||
}
|
||||
}
|
||||
if autoContinue, ok := response["auto_continue"].(bool); ok && autoContinue {
|
||||
if v, _ := chunk["v"].(map[string]any); v != nil {
|
||||
s.observeResponseObject(v["response"])
|
||||
}
|
||||
if message, _ := chunk["message"].(map[string]any); message != nil {
|
||||
s.observeResponseObject(message["response"])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *continueState) observeDirectPatch(path string, value any) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
switch strings.Trim(strings.TrimSpace(path), "/") {
|
||||
case "response/status", "status", "response/quasi_status", "quasi_status":
|
||||
s.setStatus(asString(value))
|
||||
case "response/auto_continue", "auto_continue":
|
||||
if v, ok := value.(bool); ok && v {
|
||||
s.lastStatus = "AUTO_CONTINUE"
|
||||
}
|
||||
}
|
||||
// Nested message.response
|
||||
if message, _ := chunk["message"].(map[string]any); message != nil {
|
||||
if response, _ := message["response"].(map[string]any); response != nil {
|
||||
if id := intFrom(response["message_id"]); id > 0 {
|
||||
s.responseMessageID = id
|
||||
}
|
||||
if status, _ := response["status"].(string); status != "" {
|
||||
s.lastStatus = strings.TrimSpace(status)
|
||||
if strings.EqualFold(s.lastStatus, "FINISHED") {
|
||||
s.finished = true
|
||||
}
|
||||
}
|
||||
|
||||
func (s *continueState) observeResponseObject(raw any) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
response, _ := raw.(map[string]any)
|
||||
if response == nil {
|
||||
return
|
||||
}
|
||||
if id := intFrom(response["message_id"]); id > 0 {
|
||||
s.responseMessageID = id
|
||||
}
|
||||
s.setStatus(asString(response["status"]))
|
||||
if autoContinue, ok := response["auto_continue"].(bool); ok && autoContinue {
|
||||
s.lastStatus = "AUTO_CONTINUE"
|
||||
}
|
||||
}
|
||||
|
||||
func (s *continueState) observeBatchPatches(parentPath string, raw any) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
patches, ok := raw.([]any)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for _, patch := range patches {
|
||||
m, ok := patch.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
path := strings.TrimSpace(asString(m["p"]))
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
fullPath := path
|
||||
if parent := strings.Trim(strings.TrimSpace(parentPath), "/"); parent != "" && !strings.Contains(path, "/") {
|
||||
fullPath = parent + "/" + path
|
||||
}
|
||||
switch strings.Trim(strings.TrimSpace(fullPath), "/") {
|
||||
case "response/status", "status", "response/quasi_status", "quasi_status":
|
||||
s.setStatus(asString(m["v"]))
|
||||
case "response/auto_continue", "auto_continue":
|
||||
if v, ok := m["v"].(bool); ok && v {
|
||||
s.lastStatus = "AUTO_CONTINUE"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shouldContinue returns true when the upstream indicates the response is
|
||||
// not yet finished and we have enough information to issue a continue request.
|
||||
func (s *continueState) setStatus(status string) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
normalized := strings.TrimSpace(status)
|
||||
if normalized == "" {
|
||||
return
|
||||
}
|
||||
s.lastStatus = normalized
|
||||
if strings.EqualFold(normalized, "FINISHED") || strings.EqualFold(normalized, "CONTENT_FILTER") {
|
||||
s.finished = true
|
||||
}
|
||||
}
|
||||
|
||||
// shouldContinue returns true when the upstream explicitly indicates the
|
||||
// response is incomplete and we have enough information to issue a continue
|
||||
// request. Plain WIP is not sufficient because normal streams begin in WIP.
|
||||
func (s *continueState) shouldContinue() bool {
|
||||
if s == nil {
|
||||
return false
|
||||
@@ -224,7 +295,7 @@ func (s *continueState) shouldContinue() bool {
|
||||
return false
|
||||
}
|
||||
switch strings.ToUpper(strings.TrimSpace(s.lastStatus)) {
|
||||
case "WIP", "INCOMPLETE", "AUTO_CONTINUE":
|
||||
case "INCOMPLETE", "AUTO_CONTINUE":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@@ -240,3 +311,19 @@ func (s *continueState) prepareForNextRound() {
|
||||
s.finished = false
|
||||
s.lastStatus = ""
|
||||
}
|
||||
|
||||
func asString(v any) string {
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
switch x := v.(type) {
|
||||
case string:
|
||||
return x
|
||||
default:
|
||||
s := strings.TrimSpace(strings.ReplaceAll(strings.TrimSpace(fmt.Sprint(v)), "\u0000", ""))
|
||||
if s == "<nil>" {
|
||||
return ""
|
||||
}
|
||||
return s
|
||||
}
|
||||
}
|
||||
307
internal/deepseek/client/client_continue_test.go
Normal file
307
internal/deepseek/client/client_continue_test.go
Normal file
@@ -0,0 +1,307 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"ds2api/internal/auth"
|
||||
)
|
||||
|
||||
type failingDoer struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (d failingDoer) Do(_ *http.Request) (*http.Response, error) {
|
||||
return nil, d.err
|
||||
}
|
||||
|
||||
type roundTripperFunc func(*http.Request) (*http.Response, error)
|
||||
|
||||
func (f roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return f(req)
|
||||
}
|
||||
|
||||
func TestCallContinuePropagatesPowHeaderToFallbackRequest(t *testing.T) {
|
||||
var seenPow string
|
||||
var seenURL string
|
||||
|
||||
client := &Client{
|
||||
stream: failingDoer{err: errors.New("stream transport failed")},
|
||||
fallbackS: &http.Client{
|
||||
Transport: roundTripperFunc(func(req *http.Request) (*http.Response, error) {
|
||||
seenPow = req.Header.Get("x-ds-pow-response")
|
||||
seenURL = req.URL.String()
|
||||
body := io.NopCloser(strings.NewReader("data: {\"p\":\"response/content\",\"v\":\"continued\"}\n" + "data: [DONE]\n"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: make(http.Header),
|
||||
Body: body,
|
||||
Request: req,
|
||||
}, nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := client.callContinue(context.Background(), &auth.RequestAuth{
|
||||
DeepSeekToken: "token",
|
||||
AccountID: "acct",
|
||||
}, "session-123", 99, "pow-response-abc")
|
||||
if err != nil {
|
||||
t.Fatalf("callContinue returned error: %v", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
if seenPow != "pow-response-abc" {
|
||||
t.Fatalf("continue request pow header=%q want=%q", seenPow, "pow-response-abc")
|
||||
}
|
||||
if seenURL != dsprotocol.DeepSeekContinueURL {
|
||||
t.Fatalf("continue request url=%q want=%q", seenURL, dsprotocol.DeepSeekContinueURL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallCompletionAutoContinueThreadsPowHeader(t *testing.T) {
|
||||
var seenPow string
|
||||
var seenContinueURL string
|
||||
|
||||
initialBody := strings.Join([]string{
|
||||
`data: {"response_message_id":321,"v":{"response":{"message_id":321,"status":"WIP","auto_continue":true}}}`,
|
||||
`data: [DONE]`,
|
||||
}, "\n") + "\n"
|
||||
|
||||
client := &Client{
|
||||
stream: failingOrCompletionDoer{
|
||||
completionResp: &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(strings.NewReader(initialBody)),
|
||||
},
|
||||
},
|
||||
fallbackS: &http.Client{
|
||||
Transport: roundTripperFunc(func(req *http.Request) (*http.Response, error) {
|
||||
seenPow = req.Header.Get("x-ds-pow-response")
|
||||
seenContinueURL = req.URL.String()
|
||||
body := io.NopCloser(strings.NewReader("data: {\"response_message_id\":322,\"v\":{\"response\":{\"message_id\":322,\"status\":\"FINISHED\"}}}\n" + "data: [DONE]\n"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: make(http.Header),
|
||||
Body: body,
|
||||
Request: req,
|
||||
}, nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := client.CallCompletion(context.Background(), &auth.RequestAuth{
|
||||
DeepSeekToken: "token",
|
||||
AccountID: "acct",
|
||||
}, map[string]any{
|
||||
"chat_session_id": "session-123",
|
||||
}, "pow-response-xyz", 1)
|
||||
if err != nil {
|
||||
t.Fatalf("CallCompletion returned error: %v", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
out, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("read auto-continued body failed: %v", err)
|
||||
}
|
||||
if seenPow != "pow-response-xyz" {
|
||||
t.Fatalf("threaded continue pow header=%q want=%q", seenPow, "pow-response-xyz")
|
||||
}
|
||||
if seenContinueURL != dsprotocol.DeepSeekContinueURL {
|
||||
t.Fatalf("continue url=%q want=%q", seenContinueURL, dsprotocol.DeepSeekContinueURL)
|
||||
}
|
||||
if !bytes.Contains(out, []byte(`"status":"WIP"`)) {
|
||||
t.Fatalf("expected initial stream content in body, got=%s", string(out))
|
||||
}
|
||||
if !bytes.Contains(out, []byte(`data: [DONE]`)) {
|
||||
t.Fatalf("expected final DONE sentinel in body, got=%s", string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoContinueDoesNotTriggerOnPlainWIPWithoutExplicitContinuationSignal(t *testing.T) {
|
||||
initialBody := strings.Join([]string{
|
||||
`data: {"response_message_id":321,"v":{"response":{"message_id":321,"status":"WIP","auto_continue":false}}}`,
|
||||
`data: [DONE]`,
|
||||
}, "\n") + "\n"
|
||||
|
||||
var continueCalls atomic.Int32
|
||||
body := newAutoContinueBody(context.Background(), io.NopCloser(strings.NewReader(initialBody)), "session-123", 8, func(context.Context, string, int) (*http.Response, error) {
|
||||
continueCalls.Add(1)
|
||||
return nil, errors.New("continue should not have been called")
|
||||
})
|
||||
defer func() { _ = body.Close() }()
|
||||
|
||||
out, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
t.Fatalf("read body failed: %v", err)
|
||||
}
|
||||
if continueCalls.Load() != 0 {
|
||||
t.Fatalf("expected no continue calls, got %d", continueCalls.Load())
|
||||
}
|
||||
if !bytes.Contains(out, []byte(`"status":"WIP"`)) || !bytes.Contains(out, []byte(`data: [DONE]`)) {
|
||||
t.Fatalf("expected original body to pass through unchanged, got=%s", string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoContinuePassesThroughLongSingleSSELine(t *testing.T) {
|
||||
payload := strings.Repeat("x", 2*1024*1024+4096)
|
||||
initialBody := `data: {"p":"response/content","v":"` + payload + `"}` + "\n" +
|
||||
`data: [DONE]` + "\n"
|
||||
|
||||
body := newAutoContinueBody(context.Background(), io.NopCloser(strings.NewReader(initialBody)), "session-123", 8, func(context.Context, string, int) (*http.Response, error) {
|
||||
return nil, errors.New("continue should not have been called")
|
||||
})
|
||||
defer func() { _ = body.Close() }()
|
||||
|
||||
out, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
t.Fatalf("read body failed: %v", err)
|
||||
}
|
||||
if !bytes.Contains(out, []byte(payload)) {
|
||||
t.Fatalf("expected long SSE payload to pass through, got len=%d want payload len=%d", len(out), len(payload))
|
||||
}
|
||||
if !bytes.Contains(out, []byte(`data: [DONE]`)) {
|
||||
t.Fatalf("expected final DONE sentinel in body, got len=%d", len(out))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoContinueTriggersOnDirectQuasiStatusIncomplete(t *testing.T) {
|
||||
initialBody := strings.Join([]string{
|
||||
`data: {"response_message_id":321,"p":"response/content","v":"<tool_calls><invoke name=\"write_file\"><parameter name=\"content\"><![CDATA[part-one"}`,
|
||||
`data: {"p":"response/quasi_status","v":"INCOMPLETE"}`,
|
||||
`data: [DONE]`,
|
||||
}, "\n") + "\n"
|
||||
|
||||
var continueCalls atomic.Int32
|
||||
body := newAutoContinueBody(context.Background(), io.NopCloser(strings.NewReader(initialBody)), "session-123", 8, func(context.Context, string, int) (*http.Response, error) {
|
||||
continueCalls.Add(1)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(strings.NewReader(
|
||||
`data: {"response_message_id":322,"p":"response/content","v":"-part-two]]></parameter></invoke></tool_calls>"}` + "\n" +
|
||||
`data: {"p":"response/status","v":"FINISHED"}` + "\n" +
|
||||
`data: [DONE]` + "\n",
|
||||
)),
|
||||
}, nil
|
||||
})
|
||||
defer func() { _ = body.Close() }()
|
||||
|
||||
out, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
t.Fatalf("read body failed: %v", err)
|
||||
}
|
||||
if continueCalls.Load() != 1 {
|
||||
t.Fatalf("expected exactly one continue call, got %d", continueCalls.Load())
|
||||
}
|
||||
if !bytes.Contains(out, []byte("part-one")) || !bytes.Contains(out, []byte("-part-two")) {
|
||||
t.Fatalf("expected continued tool content in body, got=%s", string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoContinueTriggersOnResponseBatchQuasiStatusIncomplete(t *testing.T) {
|
||||
initialBody := strings.Join([]string{
|
||||
`data: {"response_message_id":321,"v":{"response":{"message_id":321,"status":"WIP","auto_continue":false}}}`,
|
||||
`data: {"p":"response","o":"BATCH","v":[{"p":"accumulated_token_usage","v":2413},{"p":"quasi_status","v":"INCOMPLETE"}]}`,
|
||||
`data: [DONE]`,
|
||||
}, "\n") + "\n"
|
||||
|
||||
var continueCalls atomic.Int32
|
||||
body := newAutoContinueBody(context.Background(), io.NopCloser(strings.NewReader(initialBody)), "session-123", 8, func(context.Context, string, int) (*http.Response, error) {
|
||||
continueCalls.Add(1)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(strings.NewReader(
|
||||
`data: {"response_message_id":322,"p":"response/status","v":"FINISHED"}` + "\n" +
|
||||
`data: [DONE]` + "\n",
|
||||
)),
|
||||
}, nil
|
||||
})
|
||||
defer func() { _ = body.Close() }()
|
||||
|
||||
out, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
t.Fatalf("read body failed: %v", err)
|
||||
}
|
||||
if continueCalls.Load() != 1 {
|
||||
t.Fatalf("expected exactly one continue call, got %d", continueCalls.Load())
|
||||
}
|
||||
if !bytes.Contains(out, []byte(`"quasi_status","v":"INCOMPLETE"`)) || !bytes.Contains(out, []byte(`"v":"FINISHED"`)) {
|
||||
t.Fatalf("expected continued output to include initial and final rounds, got=%s", string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoContinueDoesNotTriggerWhenResponseBatchQuasiStatusFinished(t *testing.T) {
|
||||
initialBody := strings.Join([]string{
|
||||
`data: {"response_message_id":321,"v":{"response":{"message_id":321,"status":"WIP","auto_continue":false}}}`,
|
||||
`data: {"p":"response","o":"BATCH","v":[{"p":"accumulated_token_usage","v":2413},{"p":"quasi_status","v":"FINISHED"}]}`,
|
||||
`data: [DONE]`,
|
||||
}, "\n") + "\n"
|
||||
|
||||
var continueCalls atomic.Int32
|
||||
body := newAutoContinueBody(context.Background(), io.NopCloser(strings.NewReader(initialBody)), "session-123", 8, func(context.Context, string, int) (*http.Response, error) {
|
||||
continueCalls.Add(1)
|
||||
return nil, errors.New("continue should not have been called")
|
||||
})
|
||||
defer func() { _ = body.Close() }()
|
||||
|
||||
out, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
t.Fatalf("read body failed: %v", err)
|
||||
}
|
||||
if continueCalls.Load() != 0 {
|
||||
t.Fatalf("expected no continue calls, got %d", continueCalls.Load())
|
||||
}
|
||||
if !bytes.Contains(out, []byte(`"quasi_status","v":"FINISHED"`)) || !bytes.Contains(out, []byte(`data: [DONE]`)) {
|
||||
t.Fatalf("expected original finished body to pass through unchanged, got=%s", string(out))
|
||||
}
|
||||
}
|
||||
|
||||
type failingOrCompletionDoer struct {
|
||||
completionResp *http.Response
|
||||
}
|
||||
|
||||
func (d failingOrCompletionDoer) Do(req *http.Request) (*http.Response, error) {
|
||||
if strings.Contains(req.URL.Path, "/chat/completion") {
|
||||
return d.completionResp, nil
|
||||
}
|
||||
return nil, errors.New("forced stream failure")
|
||||
}
|
||||
|
||||
func TestAutoContinuePreservesIncompleteStateWhenNextChunkOmitsStatus(t *testing.T) {
|
||||
initialBody := strings.Join([]string{
|
||||
`data: {"response_message_id":321,"v":{"response":{"message_id":321,"status":"INCOMPLETE"}}}`,
|
||||
`data: {"p":"response/content","v":{"text":"continued"}}`,
|
||||
`data: [DONE]`,
|
||||
}, "\n") + "\n"
|
||||
|
||||
var continueCalls atomic.Int32
|
||||
body := newAutoContinueBody(context.Background(), io.NopCloser(strings.NewReader(initialBody)), "session-123", 8, func(context.Context, string, int) (*http.Response, error) {
|
||||
continueCalls.Add(1)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(strings.NewReader(
|
||||
`data: {"response_message_id":322,"p":"response/status","v":"FINISHED"}` + "\n" +
|
||||
`data: [DONE]` + "\n",
|
||||
)),
|
||||
}, nil
|
||||
})
|
||||
defer func() { _ = body.Close() }()
|
||||
|
||||
_, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
t.Fatalf("read body failed: %v", err)
|
||||
}
|
||||
if continueCalls.Load() != 1 {
|
||||
t.Fatalf("expected exactly one continue call, got %d", continueCalls.Load())
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,7 +1,8 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -21,6 +22,9 @@ const (
|
||||
|
||||
var fileReadySleep = time.Sleep
|
||||
|
||||
// ErrUploadFileNotFound indicates that DeepSeek returned no matching uploaded file.
|
||||
var ErrUploadFileNotFound = errors.New("uploaded file not found")
|
||||
|
||||
func (c *Client) waitForUploadedFile(ctx context.Context, a *auth.RequestAuth, result *UploadFileResult) error {
|
||||
if result == nil || strings.TrimSpace(result.ID) == "" {
|
||||
return nil
|
||||
@@ -41,7 +45,7 @@ func (c *Client) waitForUploadedFile(ctx context.Context, a *auth.RequestAuth, r
|
||||
return fmt.Errorf("waiting for file %s to become ready: %w", result.ID, err)
|
||||
}
|
||||
|
||||
fetched, err := c.fetchUploadedFile(pollCtx, a, result.ID)
|
||||
fetched, err := c.FetchUploadedFile(pollCtx, a, result.ID)
|
||||
if err == nil && fetched != nil {
|
||||
mergeUploadFileResults(result, fetched)
|
||||
if isReadyUploadFileStatus(result.Status) {
|
||||
@@ -64,13 +68,14 @@ func (c *Client) waitForUploadedFile(ctx context.Context, a *auth.RequestAuth, r
|
||||
return fmt.Errorf("file %s did not become ready: %w", result.ID, lastErr)
|
||||
}
|
||||
|
||||
func (c *Client) fetchUploadedFile(ctx context.Context, a *auth.RequestAuth, fileID string) (*UploadFileResult, error) {
|
||||
// FetchUploadedFile returns metadata for an uploaded DeepSeek file by ID.
|
||||
func (c *Client) FetchUploadedFile(ctx context.Context, a *auth.RequestAuth, fileID string) (*UploadFileResult, error) {
|
||||
fileID = strings.TrimSpace(fileID)
|
||||
if fileID == "" {
|
||||
return nil, errors.New("file id is required")
|
||||
}
|
||||
clients := c.requestClientsForAuth(ctx, a)
|
||||
reqURL := DeepSeekFetchFilesURL + "?file_ids=" + url.QueryEscape(fileID)
|
||||
reqURL := dsprotocol.DeepSeekFetchFilesURL + "?file_ids=" + url.QueryEscape(fileID)
|
||||
headers := c.authHeaders(a.DeepSeekToken)
|
||||
|
||||
resp, status, err := c.getJSONWithStatus(ctx, clients.regular, reqURL, headers)
|
||||
@@ -91,7 +96,7 @@ func (c *Client) fetchUploadedFile(ctx context.Context, a *auth.RequestAuth, fil
|
||||
|
||||
result := extractFetchedUploadFileResult(resp, fileID)
|
||||
if result == nil || strings.TrimSpace(result.ID) == "" {
|
||||
return nil, errors.New("fetch files succeeded without matching file data")
|
||||
return nil, ErrUploadFileNotFound
|
||||
}
|
||||
result.Raw = resp
|
||||
return result, nil
|
||||
@@ -1,7 +1,6 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -41,17 +40,10 @@ func (c *Client) jsonHeaders(headers map[string]string) map[string]string {
|
||||
return out
|
||||
}
|
||||
|
||||
func ScanSSELines(resp *http.Response, onLine func([]byte) bool) error {
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
buf := make([]byte, 0, 64*1024)
|
||||
scanner.Buffer(buf, 2*1024*1024)
|
||||
for scanner.Scan() {
|
||||
if !onLine(scanner.Bytes()) {
|
||||
break
|
||||
}
|
||||
func cloneStringMap(in map[string]string) map[string]string {
|
||||
out := make(map[string]string, len(in))
|
||||
for k, v := range in {
|
||||
out[k] = v
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return out
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -1,4 +1,4 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,7 +1,8 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -49,7 +50,7 @@ func (c *Client) GetSessionCount(ctx context.Context, a *auth.RequestAuth, maxAt
|
||||
headers := c.authHeaders(a.DeepSeekToken)
|
||||
|
||||
// 构建请求 URL
|
||||
reqURL := DeepSeekFetchSessionURL + "?lte_cursor.pinned=false"
|
||||
reqURL := dsprotocol.DeepSeekFetchSessionURL + "?lte_cursor.pinned=false"
|
||||
|
||||
resp, status, err := c.getJSONWithStatus(ctx, clients.regular, reqURL, headers)
|
||||
if err != nil {
|
||||
@@ -109,7 +110,7 @@ func (c *Client) GetSessionCount(ctx context.Context, a *auth.RequestAuth, maxAt
|
||||
func (c *Client) GetSessionCountForToken(ctx context.Context, token string) (*SessionStats, error) {
|
||||
clients := c.requestClientsFromContext(ctx)
|
||||
headers := c.authHeaders(token)
|
||||
reqURL := DeepSeekFetchSessionURL + "?lte_cursor.pinned=false"
|
||||
reqURL := dsprotocol.DeepSeekFetchSessionURL + "?lte_cursor.pinned=false"
|
||||
|
||||
resp, status, err := c.getJSONWithStatus(ctx, clients.regular, reqURL, headers)
|
||||
if err != nil {
|
||||
@@ -202,7 +203,7 @@ func (c *Client) FetchSessionPage(ctx context.Context, a *auth.RequestAuth, curs
|
||||
if cursor != "" {
|
||||
params.Set("lte_cursor", cursor)
|
||||
}
|
||||
reqURL := DeepSeekFetchSessionURL + "?" + params.Encode()
|
||||
reqURL := dsprotocol.DeepSeekFetchSessionURL + "?" + params.Encode()
|
||||
|
||||
resp, status, err := c.getJSONWithStatus(ctx, clients.regular, reqURL, headers)
|
||||
if err != nil {
|
||||
@@ -1,7 +1,8 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -43,7 +44,7 @@ func (c *Client) DeleteSession(ctx context.Context, a *auth.RequestAuth, session
|
||||
"chat_session_id": sessionID,
|
||||
}
|
||||
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, DeepSeekDeleteSessionURL, headers, payload)
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, dsprotocol.DeepSeekDeleteSessionURL, headers, payload)
|
||||
if err != nil {
|
||||
config.Logger.Warn("[delete_session] request error", "error", err, "session_id", sessionID)
|
||||
attempts++
|
||||
@@ -97,7 +98,7 @@ func (c *Client) DeleteSessionForToken(ctx context.Context, token string, sessio
|
||||
"chat_session_id": sessionID,
|
||||
}
|
||||
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, DeepSeekDeleteSessionURL, headers, payload)
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, dsprotocol.DeepSeekDeleteSessionURL, headers, payload)
|
||||
if err != nil {
|
||||
result.ErrorMessage = err.Error()
|
||||
return result, err
|
||||
@@ -120,7 +121,7 @@ func (c *Client) DeleteAllSessions(ctx context.Context, a *auth.RequestAuth) err
|
||||
headers := c.authHeaders(a.DeepSeekToken)
|
||||
payload := map[string]any{}
|
||||
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, DeepSeekDeleteAllSessionsURL, headers, payload)
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, dsprotocol.DeepSeekDeleteAllSessionsURL, headers, payload)
|
||||
if err != nil {
|
||||
config.Logger.Warn("[delete_all_sessions] request error", "error", err)
|
||||
return err
|
||||
@@ -142,7 +143,7 @@ func (c *Client) DeleteAllSessionsForToken(ctx context.Context, token string) er
|
||||
headers := c.authHeaders(token)
|
||||
payload := map[string]any{}
|
||||
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, DeepSeekDeleteAllSessionsURL, headers, payload)
|
||||
resp, status, err := c.postJSONWithStatus(ctx, clients.regular, clients.fallback, dsprotocol.DeepSeekDeleteAllSessionsURL, headers, payload)
|
||||
if err != nil {
|
||||
config.Logger.Warn("[delete_all_sessions_for_token] request error", "error", err)
|
||||
return err
|
||||
@@ -1,8 +1,9 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -22,6 +23,7 @@ type UploadFileRequest struct {
|
||||
Filename string
|
||||
ContentType string
|
||||
Purpose string
|
||||
ModelType string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
@@ -53,6 +55,7 @@ func (c *Client) UploadFile(ctx context.Context, a *auth.RequestAuth, req Upload
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
purpose := strings.TrimSpace(req.Purpose)
|
||||
modelType := strings.ToLower(strings.TrimSpace(req.ModelType))
|
||||
body, contentTypeHeader, err := buildUploadMultipartBody(filename, contentType, req.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -63,14 +66,19 @@ func (c *Client) UploadFile(ctx context.Context, a *auth.RequestAuth, req Upload
|
||||
"purpose": purpose,
|
||||
"bytes": len(req.Data),
|
||||
}
|
||||
captureSession := c.capture.Start("deepseek_upload_file", DeepSeekUploadFileURL, a.AccountID, capturePayload)
|
||||
if modelType != "" {
|
||||
capturePayload["model_type"] = modelType
|
||||
}
|
||||
captureSession := c.capture.Start("deepseek_upload_file", dsprotocol.DeepSeekUploadFileURL, a.AccountID, capturePayload)
|
||||
attempts := 0
|
||||
refreshed := false
|
||||
powHeader := ""
|
||||
lastFailureKind := FailureUnknown
|
||||
lastFailureMessage := ""
|
||||
for attempts < maxAttempts {
|
||||
clients := c.requestClientsForAuth(ctx, a)
|
||||
if strings.TrimSpace(powHeader) == "" {
|
||||
powHeader, err = c.GetPowForTarget(ctx, a, DeepSeekUploadTargetPath, maxAttempts)
|
||||
powHeader, err = c.GetPowForTarget(ctx, a, dsprotocol.DeepSeekUploadTargetPath, maxAttempts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -78,13 +86,18 @@ func (c *Client) UploadFile(ctx context.Context, a *auth.RequestAuth, req Upload
|
||||
}
|
||||
headers := c.authHeaders(a.DeepSeekToken)
|
||||
headers["Content-Type"] = contentTypeHeader
|
||||
if modelType != "" {
|
||||
headers["x-model-type"] = modelType
|
||||
}
|
||||
headers["x-ds-pow-response"] = powHeader
|
||||
headers["x-file-size"] = strconv.Itoa(len(req.Data))
|
||||
headers["x-thinking-enabled"] = "1"
|
||||
resp, err := c.doUpload(ctx, clients.regular, clients.fallback, DeepSeekUploadFileURL, headers, body)
|
||||
resp, err := c.doUpload(ctx, clients.regular, clients.fallback, dsprotocol.DeepSeekUploadFileURL, headers, body)
|
||||
if err != nil {
|
||||
config.Logger.Warn("[upload_file] request error", "error", err, "account", a.AccountID, "filename", filename)
|
||||
powHeader = ""
|
||||
lastFailureKind = FailureUnknown
|
||||
lastFailureMessage = err.Error()
|
||||
attempts++
|
||||
continue
|
||||
}
|
||||
@@ -131,6 +144,12 @@ func (c *Client) UploadFile(ctx context.Context, a *auth.RequestAuth, req Upload
|
||||
}
|
||||
config.Logger.Warn("[upload_file] failed", "status", resp.StatusCode, "code", code, "biz_code", bizCode, "msg", msg, "biz_msg", bizMsg, "account", a.AccountID, "filename", filename)
|
||||
powHeader = ""
|
||||
lastFailureMessage = failureMessage(msg, bizMsg, "upload file failed")
|
||||
if isTokenInvalid(resp.StatusCode, code, bizCode, msg, bizMsg) || isAuthIndicativeBizFailure(msg, bizMsg) {
|
||||
lastFailureKind = authFailureKind(a.UseConfigToken)
|
||||
} else {
|
||||
lastFailureKind = FailureUnknown
|
||||
}
|
||||
if a.UseConfigToken {
|
||||
if !refreshed && shouldAttemptRefresh(resp.StatusCode, code, bizCode, msg, bizMsg) {
|
||||
if c.Auth.RefreshToken(ctx, a) {
|
||||
@@ -147,6 +166,9 @@ func (c *Client) UploadFile(ctx context.Context, a *auth.RequestAuth, req Upload
|
||||
}
|
||||
attempts++
|
||||
}
|
||||
if lastFailureKind != FailureUnknown {
|
||||
return nil, &RequestFailure{Op: "upload file", Kind: lastFailureKind, Message: lastFailureMessage}
|
||||
}
|
||||
return nil, errors.New("upload file failed")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@@ -75,12 +76,13 @@ func TestExtractUploadFileResultSupportsNestedShapes(t *testing.T) {
|
||||
|
||||
func TestUploadFileUsesUploadTargetPowAndMultipartHeaders(t *testing.T) {
|
||||
challengeHash := powpkg.DeepSeekHashV1([]byte(powpkg.BuildPrefix("salt", 1712345678) + "42"))
|
||||
powResponse := `{"code":0,"msg":"ok","data":{"biz_code":0,"biz_data":{"challenge":{"algorithm":"DeepSeekHashV1","challenge":"` + hex.EncodeToString(challengeHash[:]) + `","salt":"salt","expire_at":1712345678,"difficulty":1000,"signature":"sig","target_path":"` + DeepSeekUploadTargetPath + `"}}}}`
|
||||
powResponse := `{"code":0,"msg":"ok","data":{"biz_code":0,"biz_data":{"challenge":{"algorithm":"DeepSeekHashV1","challenge":"` + hex.EncodeToString(challengeHash[:]) + `","salt":"salt","expire_at":1712345678,"difficulty":1000,"signature":"sig","target_path":"` + dsprotocol.DeepSeekUploadTargetPath + `"}}}}`
|
||||
uploadResponse := `{"code":0,"msg":"ok","data":{"biz_code":0,"biz_data":{"file":{"file_id":"file_789","filename":"demo.txt","bytes":5,"status":"processed","purpose":"assistants","is_image":false}}}}`
|
||||
var seenPow string
|
||||
var seenTargetPath string
|
||||
var seenContentType string
|
||||
var seenFileSize string
|
||||
var seenModelType string
|
||||
var seenBody string
|
||||
call := 0
|
||||
client := &Client{
|
||||
@@ -95,6 +97,7 @@ func TestUploadFileUsesUploadTargetPowAndMultipartHeaders(t *testing.T) {
|
||||
seenPow = req.Header.Get("x-ds-pow-response")
|
||||
seenContentType = req.Header.Get("Content-Type")
|
||||
seenFileSize = req.Header.Get("x-file-size")
|
||||
seenModelType = req.Header.Get("x-model-type")
|
||||
seenBody = string(bodyBytes)
|
||||
return &http.Response{StatusCode: http.StatusOK, Header: make(http.Header), Body: io.NopCloser(strings.NewReader(uploadResponse)), Request: req}, nil
|
||||
default:
|
||||
@@ -111,6 +114,7 @@ func TestUploadFileUsesUploadTargetPowAndMultipartHeaders(t *testing.T) {
|
||||
Filename: "demo.txt",
|
||||
ContentType: "text/plain",
|
||||
Purpose: "assistants",
|
||||
ModelType: "vision",
|
||||
Data: []byte("hello"),
|
||||
}, 1)
|
||||
if err != nil {
|
||||
@@ -119,7 +123,7 @@ func TestUploadFileUsesUploadTargetPowAndMultipartHeaders(t *testing.T) {
|
||||
if result.ID != "file_789" {
|
||||
t.Fatalf("expected uploaded file id file_789, got %#v", result)
|
||||
}
|
||||
if !strings.Contains(seenTargetPath, `"target_path":"`+DeepSeekUploadTargetPath+`"`) {
|
||||
if !strings.Contains(seenTargetPath, `"target_path":"`+dsprotocol.DeepSeekUploadTargetPath+`"`) {
|
||||
t.Fatalf("expected upload target_path in pow request, got %q", seenTargetPath)
|
||||
}
|
||||
if strings.TrimSpace(seenPow) == "" {
|
||||
@@ -133,12 +137,15 @@ func TestUploadFileUsesUploadTargetPowAndMultipartHeaders(t *testing.T) {
|
||||
if err := json.Unmarshal(rawPow, &powHeader); err != nil {
|
||||
t.Fatalf("unmarshal pow header failed: %v", err)
|
||||
}
|
||||
if powHeader["target_path"] != DeepSeekUploadTargetPath {
|
||||
t.Fatalf("expected pow target_path %q, got %#v", DeepSeekUploadTargetPath, powHeader["target_path"])
|
||||
if powHeader["target_path"] != dsprotocol.DeepSeekUploadTargetPath {
|
||||
t.Fatalf("expected pow target_path %q, got %#v", dsprotocol.DeepSeekUploadTargetPath, powHeader["target_path"])
|
||||
}
|
||||
if seenFileSize != "5" {
|
||||
t.Fatalf("expected x-file-size=5, got %q", seenFileSize)
|
||||
}
|
||||
if seenModelType != "vision" {
|
||||
t.Fatalf("expected x-model-type=vision, got %q", seenModelType)
|
||||
}
|
||||
if !strings.HasPrefix(seenContentType, "multipart/form-data; boundary=") {
|
||||
t.Fatalf("expected multipart content type, got %q", seenContentType)
|
||||
}
|
||||
@@ -153,7 +160,7 @@ func TestUploadFileWaitsForProcessedFetchFiles(t *testing.T) {
|
||||
defer func() { fileReadySleep = oldSleep }()
|
||||
|
||||
challengeHash := powpkg.DeepSeekHashV1([]byte(powpkg.BuildPrefix("salt", 1712345678) + "42"))
|
||||
powResponse := `{"code":0,"msg":"ok","data":{"biz_code":0,"biz_data":{"challenge":{"algorithm":"DeepSeekHashV1","challenge":"` + hex.EncodeToString(challengeHash[:]) + `","salt":"salt","expire_at":1712345678,"difficulty":1000,"signature":"sig","target_path":"` + DeepSeekUploadTargetPath + `"}}}}`
|
||||
powResponse := `{"code":0,"msg":"ok","data":{"biz_code":0,"biz_data":{"challenge":{"algorithm":"DeepSeekHashV1","challenge":"` + hex.EncodeToString(challengeHash[:]) + `","salt":"salt","expire_at":1712345678,"difficulty":1000,"signature":"sig","target_path":"` + dsprotocol.DeepSeekUploadTargetPath + `"}}}}`
|
||||
uploadResponse := `{"code":0,"msg":"ok","data":{"biz_code":0,"biz_data":{"file":{"file_id":"file_789","filename":"demo.txt","bytes":5,"status":"PENDING","purpose":"assistants","is_image":false}}}}`
|
||||
pendingFetchResponse := `{"code":0,"msg":"ok","data":{"biz_code":0,"biz_data":{"files":[{"file_id":"file_789","filename":"demo.txt","bytes":5,"status":"PENDING","purpose":"assistants","is_image":false}]}}}`
|
||||
processedFetchResponse := `{"code":0,"msg":"ok","data":{"biz_code":0,"biz_data":{"files":[{"file_id":"file_789","filename":"demo.txt","bytes":5,"status":"processed","purpose":"assistants","is_image":true}]}}}`
|
||||
@@ -165,7 +172,7 @@ func TestUploadFileWaitsForProcessedFetchFiles(t *testing.T) {
|
||||
switch call {
|
||||
case 1:
|
||||
bodyBytes, _ := io.ReadAll(req.Body)
|
||||
if !strings.Contains(string(bodyBytes), `"target_path":"`+DeepSeekUploadTargetPath+`"`) {
|
||||
if !strings.Contains(string(bodyBytes), `"target_path":"`+dsprotocol.DeepSeekUploadTargetPath+`"`) {
|
||||
t.Fatalf("expected pow target path request, got %s", string(bodyBytes))
|
||||
}
|
||||
return &http.Response{StatusCode: http.StatusOK, Header: make(http.Header), Body: io.NopCloser(strings.NewReader(powResponse)), Request: req}, nil
|
||||
@@ -1,4 +1,4 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
46
internal/deepseek/client/errors.go
Normal file
46
internal/deepseek/client/errors.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type FailureKind string
|
||||
|
||||
const (
|
||||
FailureUnknown FailureKind = ""
|
||||
FailureDirectUnauthorized FailureKind = "direct_unauthorized"
|
||||
FailureManagedUnauthorized FailureKind = "managed_unauthorized"
|
||||
)
|
||||
|
||||
type RequestFailure struct {
|
||||
Op string
|
||||
Kind FailureKind
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *RequestFailure) Error() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
switch {
|
||||
case e.Op != "" && e.Message != "":
|
||||
return fmt.Sprintf("%s: %s", e.Op, e.Message)
|
||||
case e.Op != "":
|
||||
return e.Op + " failed"
|
||||
case e.Message != "":
|
||||
return e.Message
|
||||
default:
|
||||
return "request failed"
|
||||
}
|
||||
}
|
||||
|
||||
func IsManagedUnauthorizedError(err error) bool {
|
||||
var failure *RequestFailure
|
||||
return errors.As(err, &failure) && failure.Kind == FailureManagedUnauthorized
|
||||
}
|
||||
|
||||
func IsDirectUnauthorizedError(err error) bool {
|
||||
var failure *RequestFailure
|
||||
return errors.As(err, &failure) && failure.Kind == FailureDirectUnauthorized
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,4 +1,4 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,7 +1,8 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -172,7 +173,7 @@ func applyProxyConnectivityHeaders(req *http.Request) {
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
for key, value := range BaseHeaders {
|
||||
for key, value := range dsprotocol.BaseHeaders {
|
||||
key = strings.TrimSpace(key)
|
||||
value = strings.TrimSpace(value)
|
||||
if key == "" || value == "" {
|
||||
@@ -1,7 +1,8 @@
|
||||
package deepseek
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
dsprotocol "ds2api/internal/deepseek/protocol"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -52,7 +53,7 @@ func TestApplyProxyConnectivityHeadersUsesBaseHeaders(t *testing.T) {
|
||||
|
||||
applyProxyConnectivityHeaders(req)
|
||||
|
||||
for key, want := range BaseHeaders {
|
||||
for key, want := range dsprotocol.BaseHeaders {
|
||||
if got := req.Header.Get(key); got != want {
|
||||
t.Fatalf("expected header %q=%q, got %q", key, want, got)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user