From 83b4c7bcad4a13eed8a180ad9d189eebb9347b04 Mon Sep 17 00:00:00 2001 From: BigUncle Date: Fri, 1 May 2026 20:50:12 +0800 Subject: [PATCH 01/15] fix: add missing Vercel rewrite rules for admin API routes /admin/chat-history, /admin/proxies, /admin/dev/raw-samples, and /admin/dev/captures were falling through to the SPA fallback (/admin/index.html), causing "Unexpected token '<'" JSON parse errors on the frontend. --- vercel.json | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/vercel.json b/vercel.json index 600a53c..5cebd5d 100644 --- a/vercel.json +++ b/vercel.json @@ -81,6 +81,22 @@ "source": "/admin/version", "destination": "/api/index" }, + { + "source": "/admin/chat-history(.*)", + "destination": "/api/index" + }, + { + "source": "/admin/proxies(.*)", + "destination": "/api/index" + }, + { + "source": "/admin/dev/raw-samples/(.*)", + "destination": "/api/index" + }, + { + "source": "/admin/dev/captures(.*)", + "destination": "/api/index" + }, { "source": "/admin", "destination": "/admin/index.html" From 706e68de233340e1cec392ba4fc5dbeb55b814a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B?= Date: Fri, 1 May 2026 21:11:36 +0800 Subject: [PATCH 02/15] fix: increase stream timeout constants for large-context models; guard against context-cancelled double-recording - Increase StreamIdleTimeout from 90s to 300s and MaxKeepaliveCount from 10 to 40 to prevent premature stream termination with DeepSeek V4 Pro (~50K token contexts) - Add r.Context().Err() check after ConsumeSSE in empty_retry_runtime (chat + responses) to prevent historySession.error() from overwriting historySession.stopped() when the request context is cancelled References: - MaxKeepaliveCount=10 creates a 50s no-content timeout that kills the stream before DeepSeek V4 Pro can produce its first token with large contexts - Hermes Agent reports 'No response from provider for 180s' because the underlying SSE connection was already terminated by ds2api at 50s - Context cancellation path: OnContextDone -> stopped(), then finalize() with empty output -> retry -> error() overwrites stopped() --- internal/deepseek/protocol/constants.go | 4 ++-- internal/httpapi/openai/chat/empty_retry_runtime.go | 3 +++ internal/httpapi/openai/responses/empty_retry_runtime.go | 3 +++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/internal/deepseek/protocol/constants.go b/internal/deepseek/protocol/constants.go index 3cb6c4d..83daa31 100644 --- a/internal/deepseek/protocol/constants.go +++ b/internal/deepseek/protocol/constants.go @@ -159,6 +159,6 @@ func toStringSet(in []string) map[string]struct{} { const ( KeepAliveTimeout = 5 - StreamIdleTimeout = 90 - MaxKeepaliveCount = 10 + StreamIdleTimeout = 300 + MaxKeepaliveCount = 40 ) diff --git a/internal/httpapi/openai/chat/empty_retry_runtime.go b/internal/httpapi/openai/chat/empty_retry_runtime.go index de2ff12..147024f 100644 --- a/internal/httpapi/openai/chat/empty_retry_runtime.go +++ b/internal/httpapi/openai/chat/empty_retry_runtime.go @@ -252,6 +252,9 @@ func (h *Handler) consumeChatStreamAttempt(r *http.Request, resp *http.Response, } }, }) + if r.Context().Err() != nil { + return true, false + } terminalWritten := streamRuntime.finalize(finalReason, allowDeferEmpty && finalReason != "content_filter") if terminalWritten { recordChatStreamHistory(streamRuntime, historySession) diff --git a/internal/httpapi/openai/responses/empty_retry_runtime.go b/internal/httpapi/openai/responses/empty_retry_runtime.go index 627f074..45d861d 100644 --- a/internal/httpapi/openai/responses/empty_retry_runtime.go +++ b/internal/httpapi/openai/responses/empty_retry_runtime.go @@ -223,6 +223,9 @@ func (h *Handler) consumeResponsesStreamAttempt(r *http.Request, resp *http.Resp } }, }) + if r.Context().Err() != nil { + return true, false + } terminalWritten := streamRuntime.finalize(finalReason, allowDeferEmpty && finalReason != "content_filter") if terminalWritten { return true, false From df1cfac9bca8d58c2ff581a4654d58a9f2fb276b Mon Sep 17 00:00:00 2001 From: CJACK Date: Fri, 1 May 2026 21:15:17 +0800 Subject: [PATCH 03/15] refactor: replace history transcript format with numbered sections and rename upload file to HISTORY.txt --- docs/prompt-compatibility.md | 25 +++-- .../httpapi/openai/chat/chat_history_test.go | 4 +- .../openai/history/current_input_file.go | 2 +- internal/httpapi/openai/history_split_test.go | 81 +++++++++++----- internal/promptcompat/history_transcript.go | 93 +++++++++++++++++-- webui/src/locales/en.json | 2 +- webui/src/locales/zh.json | 2 +- 7 files changed, 164 insertions(+), 45 deletions(-) diff --git a/docs/prompt-compatibility.md b/docs/prompt-compatibility.md index d92cea3..f5e55c8 100644 --- a/docs/prompt-compatibility.md +++ b/docs/prompt-compatibility.md @@ -249,7 +249,7 @@ OpenAI 文件相关实现: 兼容层现在只保留 `current_input_file` 这一种拆分方式;旧的 `history_split` 已废弃,只保留为兼容旧配置的字段,不再参与请求处理。 -- `current_input_file` 默认开启;它用于把“完整上下文”合并进 `history.txt` 上下文文件。当最新 user turn 的纯文本长度达到 `current_input_file.min_chars`(默认 `0`)时,兼容层会上传一个文件名为 `history.txt` 的上下文文件,并在 live prompt 中只保留一个中性的 user 消息要求模型直接回答最新请求,不再暴露文件名或要求模型读取本地文件。 +- `current_input_file` 默认开启;它用于把“完整上下文”合并进 `HISTORY.txt` 上下文文件。当最新 user turn 的纯文本长度达到 `current_input_file.min_chars`(默认 `0`)时,兼容层会上传一个文件名为 `HISTORY.txt` 的上下文文件。文件内容会先做 OpenAI 消息标准化,再序列化成按轮次编号的 `HISTORY.txt` 风格 transcript,带有 `# HISTORY.txt` 标题和 `=== N. ROLE ===` 分段;live prompt 中则只保留一个中性的 user 消息要求模型直接回答最新请求,不再暴露文件名或要求模型读取本地文件。 - 如果 `current_input_file.enabled=false`,请求会直接透传,不上传任何拆分上下文文件。 - 旧的 `history_split.enabled` / `history_split.trigger_after_turns` 会被读取进配置对象以保持兼容,但不会触发拆分上传,也不会影响 `current_input_file` 的默认开启。 - 即使触发 `current_input_file` 后 live prompt 被缩短,对客户端回包里的上下文 token 统计,仍会沿用**拆分前的完整 prompt 语义**做计数,而不是按缩短后的占位 prompt 计算;否则会把真实上下文显著算小。 @@ -263,11 +263,24 @@ OpenAI 文件相关实现: - 旧历史拆分兼容壳: [internal/httpapi/openai/history/history_split.go](../internal/httpapi/openai/history/history_split.go) -当前输入转文件启用并触发时,上传文件的真实文件名是 `history.txt`,文件内容是完整 `messages` 上下文;它仍会先用 OpenAI 消息标准化和 DeepSeek 角色标记序列化,并直接作为 `history.txt` 的纯文本内容上传(不再注入文件边界标签): +当前输入转文件启用并触发时,上传文件的真实文件名是 `HISTORY.txt`,文件内容是完整 `messages` 上下文;它仍会先用 OpenAI 消息标准化和 DeepSeek 角色标记序列化,再按轮次编号成 `HISTORY.txt` 风格的 transcript(不再注入文件边界标签): ```text -[uploaded filename]: history.txt -<|begin▁of▁sentence|><|System|>...<|User|>...<|Assistant|>...<|Tool|>...<|User|>... +[uploaded filename]: HISTORY.txt +# HISTORY.txt +Prior conversation history and tool progress. + +=== 1. SYSTEM === +... + +=== 2. USER === +... + +=== 3. ASSISTANT === +... + +=== 4. TOOL === +... ``` 开启后,请求的 live prompt 不再直接内联完整上下文,而是保留一个 user role 的短提示,提示模型基于已提供上下文直接回答最新请求;上传后的 `file_id` 会进入 `ref_file_ids`。 @@ -334,7 +347,7 @@ OpenAI 文件相关实现: - 大部分结构化语义被压进 `prompt` - 文件保持文件 -- 需要时把完整上下文拆进 `history.txt` 上下文文件 +- 需要时把完整上下文拆进 `HISTORY.txt` 上下文文件,并按轮次编号成 transcript ## 12. 修改时必须同步本文档的场景 @@ -347,7 +360,7 @@ OpenAI 文件相关实现: - tool result 注入方式变更 - tool prompt 模板或 tool_choice 约束变更 - inline 文件上传 / 文件引用收集规则变更 -- current input file 触发条件、上传格式、`history.txt` 包装格式变更 +- current input file 触发条件、上传格式、`HISTORY.txt` transcript 结构变更 - 旧 `history_split` 兼容逻辑的读取、忽略或退化行为变更 - completion payload 字段语义变更 - Claude / Gemini 对这套统一语义的复用关系变更 diff --git a/internal/httpapi/openai/chat/chat_history_test.go b/internal/httpapi/openai/chat/chat_history_test.go index 2201f0f..6b9f2a0 100644 --- a/internal/httpapi/openai/chat/chat_history_test.go +++ b/internal/httpapi/openai/chat/chat_history_test.go @@ -311,8 +311,8 @@ func TestChatCompletionsCurrentInputFilePersistsNeutralPrompt(t *testing.T) { if len(ds.uploadCalls) != 1 { t.Fatalf("expected current input upload to happen, got %d", len(ds.uploadCalls)) } - if ds.uploadCalls[0].Filename != "history.txt" { - t.Fatalf("expected history.txt upload, got %q", ds.uploadCalls[0].Filename) + if ds.uploadCalls[0].Filename != "HISTORY.txt" { + t.Fatalf("expected HISTORY.txt upload, got %q", ds.uploadCalls[0].Filename) } if full.HistoryText != string(ds.uploadCalls[0].Data) { t.Fatalf("expected uploaded current input file to be persisted in history text") diff --git a/internal/httpapi/openai/history/current_input_file.go b/internal/httpapi/openai/history/current_input_file.go index 181a5e2..7069207 100644 --- a/internal/httpapi/openai/history/current_input_file.go +++ b/internal/httpapi/openai/history/current_input_file.go @@ -62,7 +62,7 @@ func (s Service) ApplyCurrentInputFile(ctx context.Context, a *auth.RequestAuth, stdReq.RefFileIDs = prependUniqueRefFileID(stdReq.RefFileIDs, fileID) stdReq.FinalPrompt, stdReq.ToolNames = promptcompat.BuildOpenAIPrompt(messages, stdReq.ToolsRaw, "", stdReq.ToolChoice, stdReq.Thinking) // Token accounting must reflect the actual downstream context: - // the uploaded history.txt file content + the neutral live prompt. + // the uploaded HISTORY.txt file content + the neutral live prompt. stdReq.PromptTokenText = fileText + "\n" + stdReq.FinalPrompt return stdReq, nil } diff --git a/internal/httpapi/openai/history_split_test.go b/internal/httpapi/openai/history_split_test.go index d429b9b..799a1f7 100644 --- a/internal/httpapi/openai/history_split_test.go +++ b/internal/httpapi/openai/history_split_test.go @@ -61,26 +61,33 @@ func (streamStatusManagedAuthStub) DetermineCaller(_ *http.Request) (*auth.Reque func (streamStatusManagedAuthStub) Release(_ *auth.RequestAuth) {} -func TestBuildOpenAICurrentInputContextTranscriptUsesInjectedFileWrapper(t *testing.T) { +func TestBuildOpenAICurrentInputContextTranscriptUsesNumberedHistorySections(t *testing.T) { _, historyMessages := splitOpenAIHistoryMessages(historySplitTestMessages(), 1) transcript := buildOpenAICurrentInputContextTranscript(historyMessages) if strings.Contains(transcript, "[file content end]") || strings.Contains(transcript, "[file content begin]") || strings.Contains(transcript, "[file name]:") { - t.Fatalf("expected plain transcript without file wrapper tags, got %q", transcript) + t.Fatalf("expected transcript without file wrapper tags, got %q", transcript) } - if !strings.Contains(transcript, "<|begin▁of▁sentence|>") { - t.Fatalf("expected serialized conversation markers, got %q", transcript) + if !strings.Contains(transcript, "# HISTORY.txt") { + t.Fatalf("expected history transcript header, got %q", transcript) } - if !strings.Contains(transcript, "first user turn") || !strings.Contains(transcript, "tool result") { - t.Fatalf("expected historical turns preserved, got %q", transcript) + if !strings.Contains(transcript, "Prior conversation history and tool progress.") { + t.Fatalf("expected history transcript description, got %q", transcript) } - if !strings.Contains(transcript, "[reasoning_content]") || !strings.Contains(transcript, "hidden reasoning") { - t.Fatalf("expected reasoning block preserved, got %q", transcript) + for _, want := range []string{ + "=== 1. USER ===", + "=== 2. ASSISTANT ===", + "=== 3. TOOL ===", + "first user turn", + "tool result", + "[reasoning_content]", + "hidden reasoning", + "<|DSML|tool_calls>", + } { + if !strings.Contains(transcript, want) { + t.Fatalf("expected transcript to contain %q, got %q", want, transcript) + } } - if !strings.Contains(transcript, "<|DSML|tool_calls>") { - t.Fatalf("expected tool calls preserved, got %q", transcript) - } - } func TestSplitOpenAIHistoryMessagesUsesLatestUserTurn(t *testing.T) { @@ -243,7 +250,7 @@ func TestApplyCurrentInputFileDisabledPassThrough(t *testing.T) { } } -func TestApplyCurrentInputFileUploadsFirstTurnWithInjectedWrapper(t *testing.T) { +func TestApplyCurrentInputFileUploadsFirstTurnWithNumberedHistoryTranscript(t *testing.T) { ds := &inlineUploadDSStub{} h := &openAITestSurface{ Store: mockOpenAIConfig{ @@ -273,15 +280,21 @@ func TestApplyCurrentInputFileUploadsFirstTurnWithInjectedWrapper(t *testing.T) t.Fatalf("expected 1 current input upload, got %d", len(ds.uploadCalls)) } upload := ds.uploadCalls[0] - if upload.Filename != "history.txt" { + if upload.Filename != "HISTORY.txt" { t.Fatalf("unexpected upload filename: %q", upload.Filename) } uploadedText := string(upload.Data) if strings.Contains(uploadedText, "[file content end]") || strings.Contains(uploadedText, "[file content begin]") || strings.Contains(uploadedText, "[file name]:") { t.Fatalf("expected uploaded transcript without file wrapper tags, got %q", uploadedText) } - if !strings.Contains(uploadedText, "<|begin▁of▁sentence|><|User|>first turn content that is long enough") { - t.Fatalf("expected serialized current user turn markers, got %q", uploadedText) + for _, want := range []string{ + "# HISTORY.txt", + "=== 1. USER ===", + "first turn content that is long enough", + } { + if !strings.Contains(uploadedText, want) { + t.Fatalf("expected uploaded transcript to contain %q, got %q", want, uploadedText) + } } if !strings.Contains(uploadedText, promptcompat.ThinkingInjectionMarker) { t.Fatalf("expected thinking injection in current input file, got %q", uploadedText) @@ -290,7 +303,7 @@ func TestApplyCurrentInputFileUploadsFirstTurnWithInjectedWrapper(t *testing.T) if strings.Contains(out.FinalPrompt, "first turn content that is long enough") { t.Fatalf("expected current input text to be replaced in live prompt, got %s", out.FinalPrompt) } - if strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "history.txt") || strings.Contains(out.FinalPrompt, "Read that file") { + if strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "history.txt") || strings.Contains(out.FinalPrompt, "HISTORY.txt") || strings.Contains(out.FinalPrompt, "Read that file") { t.Fatalf("expected live prompt not to instruct file reads, got %s", out.FinalPrompt) } if !strings.Contains(out.FinalPrompt, "Answer the latest user request directly.") { @@ -302,6 +315,9 @@ func TestApplyCurrentInputFileUploadsFirstTurnWithInjectedWrapper(t *testing.T) if !strings.Contains(out.PromptTokenText, "first turn content that is long enough") { t.Fatalf("expected prompt token text to preserve original full context, got %q", out.PromptTokenText) } + if !strings.Contains(out.PromptTokenText, "# HISTORY.txt") || !strings.Contains(out.PromptTokenText, "=== 1. USER ===") { + t.Fatalf("expected prompt token text to include numbered history transcript, got %q", out.PromptTokenText) + } } func TestApplyCurrentInputFilePreservesFullContextPromptForTokenCounting(t *testing.T) { @@ -337,7 +353,10 @@ func TestApplyCurrentInputFilePreservesFullContextPromptForTokenCounting(t *test t.Fatalf("expected prompt token text to contain file context with full conversation, got %q", out.PromptTokenText) } if strings.Contains(out.PromptTokenText, "[file content end]") || strings.Contains(out.PromptTokenText, "[file name]:") { - t.Fatalf("expected prompt token text to use raw transcript without wrapper tags, got %q", out.PromptTokenText) + t.Fatalf("expected prompt token text to omit file wrapper tags, got %q", out.PromptTokenText) + } + if !strings.Contains(out.PromptTokenText, "# HISTORY.txt") || !strings.Contains(out.PromptTokenText, "=== 1. SYSTEM ===") { + t.Fatalf("expected prompt token text to include numbered history transcript, got %q", out.PromptTokenText) } if !strings.Contains(out.PromptTokenText, "Answer the latest user request directly.") { t.Fatalf("expected prompt token text to also include neutral live prompt, got %q", out.PromptTokenText) @@ -378,16 +397,16 @@ func TestApplyCurrentInputFileUploadsFullContextFile(t *testing.T) { t.Fatalf("expected one current input upload, got %d", len(ds.uploadCalls)) } upload := ds.uploadCalls[0] - if upload.Filename != "history.txt" { - t.Fatalf("expected history.txt upload, got %q", upload.Filename) + if upload.Filename != "HISTORY.txt" { + t.Fatalf("expected HISTORY.txt upload, got %q", upload.Filename) } uploadedText := string(upload.Data) - for _, want := range []string{"system instructions", "first user turn", "hidden reasoning", "tool result", "latest user turn", promptcompat.ThinkingInjectionMarker} { + for _, want := range []string{"# HISTORY.txt", "=== 1. SYSTEM ===", "=== 2. USER ===", "=== 3. ASSISTANT ===", "=== 4. TOOL ===", "=== 5. USER ===", "system instructions", "first user turn", "hidden reasoning", "tool result", "latest user turn", promptcompat.ThinkingInjectionMarker} { if !strings.Contains(uploadedText, want) { t.Fatalf("expected full context file to contain %q, got %q", want, uploadedText) } } - if strings.Contains(out.FinalPrompt, "first user turn") || strings.Contains(out.FinalPrompt, "latest user turn") || strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "history.txt") || strings.Contains(out.FinalPrompt, "Read that file") { + if strings.Contains(out.FinalPrompt, "first user turn") || strings.Contains(out.FinalPrompt, "latest user turn") || strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "history.txt") || strings.Contains(out.FinalPrompt, "HISTORY.txt") || strings.Contains(out.FinalPrompt, "Read that file") { t.Fatalf("expected live prompt to use only a neutral continuation instruction, got %s", out.FinalPrompt) } if !strings.Contains(out.FinalPrompt, "Answer the latest user request directly.") { @@ -423,6 +442,9 @@ func TestApplyCurrentInputFileCarriesHistoryText(t *testing.T) { if out.HistoryText != string(ds.uploadCalls[0].Data) { t.Fatalf("expected current input file flow to preserve uploaded text in history, got %q", out.HistoryText) } + if !strings.Contains(out.HistoryText, "# HISTORY.txt") || !strings.Contains(out.HistoryText, "=== 1. SYSTEM ===") { + t.Fatalf("expected history text to use numbered transcript format, got %q", out.HistoryText) + } } func TestChatCompletionsCurrentInputFileUploadsContextAndKeepsNeutralPrompt(t *testing.T) { @@ -454,7 +476,7 @@ func TestChatCompletionsCurrentInputFileUploadsContextAndKeepsNeutralPrompt(t *t t.Fatalf("expected 1 upload call, got %d", len(ds.uploadCalls)) } upload := ds.uploadCalls[0] - if upload.Filename != "history.txt" { + if upload.Filename != "HISTORY.txt" { t.Fatalf("unexpected upload filename: %q", upload.Filename) } if upload.Purpose != "assistants" { @@ -462,7 +484,10 @@ func TestChatCompletionsCurrentInputFileUploadsContextAndKeepsNeutralPrompt(t *t } historyText := string(upload.Data) if strings.Contains(historyText, "[file content end]") || strings.Contains(historyText, "[file content begin]") || strings.Contains(historyText, "[file name]:") { - t.Fatalf("expected plain history transcript without wrapper tags, got %s", historyText) + t.Fatalf("expected history transcript without file wrapper tags, got %s", historyText) + } + if !strings.Contains(historyText, "# HISTORY.txt") || !strings.Contains(historyText, "=== 1. SYSTEM ===") { + t.Fatalf("expected history transcript to use numbered sections, got %s", historyText) } if !strings.Contains(historyText, "latest user turn") { t.Fatalf("expected full context to include latest turn, got %s", historyText) @@ -523,6 +548,10 @@ func TestResponsesCurrentInputFileUploadsContextAndKeepsNeutralPrompt(t *testing if len(ds.uploadCalls) != 1 { t.Fatalf("expected 1 upload call, got %d", len(ds.uploadCalls)) } + historyText := string(ds.uploadCalls[0].Data) + if !strings.Contains(historyText, "# HISTORY.txt") || !strings.Contains(historyText, "=== 1. SYSTEM ===") { + t.Fatalf("expected uploaded history text to use numbered transcript format, got %s", historyText) + } if ds.completionReq == nil { t.Fatal("expected completion payload to be captured") } @@ -669,6 +698,10 @@ func TestCurrentInputFileWorksAcrossAutoDeleteModes(t *testing.T) { if len(ds.uploadCalls) != 1 { t.Fatalf("expected current input upload for mode=%s, got %d", mode, len(ds.uploadCalls)) } + historyText := string(ds.uploadCalls[0].Data) + if !strings.Contains(historyText, "# HISTORY.txt") || !strings.Contains(historyText, "=== 1. SYSTEM ===") { + t.Fatalf("expected uploaded history text to use numbered transcript format, got %s", historyText) + } if ds.completionReq == nil { t.Fatalf("expected completion payload for mode=%s", mode) } diff --git a/internal/promptcompat/history_transcript.go b/internal/promptcompat/history_transcript.go index a3f7905..84a62cb 100644 --- a/internal/promptcompat/history_transcript.go +++ b/internal/promptcompat/history_transcript.go @@ -1,35 +1,108 @@ package promptcompat import ( + "fmt" "strings" - - "ds2api/internal/prompt" ) -const CurrentInputContextFilename = "history.txt" +const CurrentInputContextFilename = "HISTORY.txt" + +const historyTranscriptTitle = "# HISTORY.txt" +const historyTranscriptSummary = "Prior conversation history and tool progress." func BuildOpenAIHistoryTranscript(messages []any) string { - return buildOpenAIInjectedFileTranscript(messages) + return buildOpenAIHistoryTranscript(messages) } func BuildOpenAICurrentUserInputTranscript(text string) string { if strings.TrimSpace(text) == "" { return "" } - return BuildOpenAICurrentInputContextTranscript([]any{ + return buildOpenAIHistoryTranscript([]any{ map[string]any{"role": "user", "content": text}, }) } func BuildOpenAICurrentInputContextTranscript(messages []any) string { - return buildOpenAIInjectedFileTranscript(messages) + return buildOpenAIHistoryTranscript(messages) } -func buildOpenAIInjectedFileTranscript(messages []any) string { - normalized := NormalizeOpenAIMessagesForPrompt(messages, "") - transcript := strings.TrimSpace(prompt.MessagesPrepare(normalized)) +func buildOpenAIHistoryTranscript(messages []any) string { + if len(messages) == 0 { + return "" + } + var b strings.Builder + b.WriteString(historyTranscriptTitle) + b.WriteString("\n") + b.WriteString(historyTranscriptSummary) + b.WriteString("\n\n") + + entry := 0 + for _, raw := range messages { + msg, ok := raw.(map[string]any) + if !ok { + continue + } + role := normalizeOpenAIRoleForPrompt(strings.ToLower(strings.TrimSpace(asString(msg["role"])))) + content := strings.TrimSpace(buildOpenAIHistoryEntry(role, msg)) + if content == "" { + continue + } + entry++ + fmt.Fprintf(&b, "=== %d. %s ===\n%s\n\n", entry, strings.ToUpper(roleLabelForHistory(role)), content) + } + + transcript := strings.TrimSpace(b.String()) if transcript == "" { return "" } - return transcript + return transcript + "\n" +} + +func buildOpenAIHistoryEntry(role string, msg map[string]any) string { + switch role { + case "assistant": + return strings.TrimSpace(buildAssistantContentForPrompt(msg)) + case "tool", "function": + return strings.TrimSpace(buildToolHistoryContent(msg)) + case "system", "user": + return strings.TrimSpace(NormalizeOpenAIContentForPrompt(msg["content"])) + default: + return strings.TrimSpace(NormalizeOpenAIContentForPrompt(msg["content"])) + } +} + +func buildToolHistoryContent(msg map[string]any) string { + content := strings.TrimSpace(NormalizeOpenAIContentForPrompt(msg["content"])) + parts := make([]string, 0, 2) + if name := strings.TrimSpace(asString(msg["name"])); name != "" { + parts = append(parts, "name="+name) + } + if callID := strings.TrimSpace(asString(msg["tool_call_id"])); callID != "" { + parts = append(parts, "tool_call_id="+callID) + } + header := "" + if len(parts) > 0 { + header = "[" + strings.Join(parts, " ") + "]" + } + switch { + case header != "" && content != "": + return header + "\n" + content + case header != "": + return header + default: + return content + } +} + +func roleLabelForHistory(role string) string { + role = strings.ToLower(strings.TrimSpace(role)) + switch role { + case "function": + return "tool" + case "": + return "unknown" + default: + return role + } } diff --git a/webui/src/locales/en.json b/webui/src/locales/en.json index 0b3de63..072e514 100644 --- a/webui/src/locales/en.json +++ b/webui/src/locales/en.json @@ -394,7 +394,7 @@ "thinkingInjectionPromptHelp": "Leave empty to use the built-in default prompt shown as the input placeholder.", "currentInputFileTitle": "Independent Split", "currentInputFileEnabled": "Independent split (by size)", - "currentInputFileDesc": "Enabled by default. Once the character threshold is reached, upload the full context as a history.txt context file.", + "currentInputFileDesc": "Enabled by default. Once the character threshold is reached, upload the full context as a HISTORY.txt context file.", "currentInputFileMinChars": "Current input threshold (characters)", "currentInputFileHelp": "Default is 0, which uses independent split for any non-empty input.", "compatibilityTitle": "Compatibility", diff --git a/webui/src/locales/zh.json b/webui/src/locales/zh.json index 9aa127b..409f48d 100644 --- a/webui/src/locales/zh.json +++ b/webui/src/locales/zh.json @@ -394,7 +394,7 @@ "thinkingInjectionPromptHelp": "留空时使用内置默认提示词;默认内容会显示在输入框占位文本中。", "currentInputFileTitle": "独立拆分", "currentInputFileEnabled": "独立拆分(按量)", - "currentInputFileDesc": "默认开启。达到字符阈值后,将完整上下文上传为 history.txt 上下文文件。", + "currentInputFileDesc": "默认开启。达到字符阈值后,将完整上下文上传为 HISTORY.txt 上下文文件。", "currentInputFileMinChars": "当前输入阈值(字符数)", "currentInputFileHelp": "默认 0,表示只要有输入就会使用独立拆分。", "compatibilityTitle": "兼容性设置", From 3430322e8179bc5a3c290b535546fa1ecb269813 Mon Sep 17 00:00:00 2001 From: BigUncle Date: Fri, 1 May 2026 21:17:52 +0800 Subject: [PATCH 04/15] docs: add Vercel chat history read-only filesystem troubleshooting --- docs/DEPLOY.en.md | 17 +++++++++++++++++ docs/DEPLOY.md | 17 +++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/docs/DEPLOY.en.md b/docs/DEPLOY.en.md index a7716a3..b2ed75e 100644 --- a/docs/DEPLOY.en.md +++ b/docs/DEPLOY.en.md @@ -271,6 +271,7 @@ VERCEL_TEAM_ID=team_xxxxxxxxxxxx # optional for personal accounts | `VERCEL_TOKEN` | Vercel sync token | — | | `VERCEL_PROJECT_ID` | Vercel project ID | — | | `VERCEL_TEAM_ID` | Vercel team ID | — | +| `DS2API_CHAT_HISTORY_PATH` | Chat history storage path (must be set to `/tmp/chat_history.json` on Vercel, otherwise unavailable due to read-only filesystem) | `data/chat_history.json` | | `DS2API_VERCEL_PROTECTION_BYPASS` | Deployment protection bypass for internal Node→Go calls | — | ### 3.4 Vercel Architecture @@ -360,6 +361,22 @@ If API responses return Vercel HTML `Authentication Required`: - **Option B**: Add `x-vercel-protection-bypass` header to requests - **Option C**: Set `VERCEL_AUTOMATION_BYPASS_SECRET` (or `DS2API_VERCEL_PROTECTION_BYPASS`) for internal Node→Go calls +#### Chat History Unavailable (read-only file system) + +```text +create chat history dir: mkdir /var/task/data: read-only file system +``` + +**Cause**: Vercel Serverless functions have a read-only filesystem (`/var/task`). Chat history fails because it cannot create directories there. + +**Fix**: Add the following in Vercel Project Settings → Environment Variables: + +```text +DS2API_CHAT_HISTORY_PATH=/tmp/chat_history.json +``` + +`/tmp` is the only writable directory in Vercel Serverless. Data is ephemeral (not persisted across cold starts), but the feature works within a single instance lifetime. + ### 3.6 Build Artifacts Not Committed - `static/admin` directory is not in Git diff --git a/docs/DEPLOY.md b/docs/DEPLOY.md index 3ff20ed..4a1b75f 100644 --- a/docs/DEPLOY.md +++ b/docs/DEPLOY.md @@ -271,6 +271,7 @@ VERCEL_TEAM_ID=team_xxxxxxxxxxxx # 个人账号可留空 | `VERCEL_TOKEN` | Vercel 同步 token | — | | `VERCEL_PROJECT_ID` | Vercel 项目 ID | — | | `VERCEL_TEAM_ID` | Vercel 团队 ID | — | +| `DS2API_CHAT_HISTORY_PATH` | Chat history 存储路径(Vercel 上必须设为 `/tmp/chat_history.json`,否则因文件系统只读而不可用) | `data/chat_history.json` | | `DS2API_VERCEL_PROTECTION_BYPASS` | 部署保护绕过密钥(内部 Node→Go 调用) | — | ### 3.3 运行时行为配置(通过 Admin API 设置) @@ -370,6 +371,22 @@ No Output Directory named "public" found after the Build completed. - **方案 B**:请求中添加 `x-vercel-protection-bypass` 头 - **方案 C**:设置 `VERCEL_AUTOMATION_BYPASS_SECRET`(或 `DS2API_VERCEL_PROTECTION_BYPASS`),仅影响内部 Node→Go 调用 +#### Chat History 不可用(read-only file system) + +```text +create chat history dir: mkdir /var/task/data: read-only file system +``` + +**原因**:Vercel Serverless 函数的文件系统(`/var/task`)为只读,chat history 尝试在该路径下创建目录失败。 + +**解决**:在 Vercel Project Settings → Environment Variables 中添加: + +```text +DS2API_CHAT_HISTORY_PATH=/tmp/chat_history.json +``` + +`/tmp` 是 Vercel Serverless 环境中唯一可写的目录。数据在函数冷启动之间不会持久化(ephemeral),但在单个实例生命周期内功能正常。 + ### 3.6 仓库不提交构建产物 - `static/admin` 目录不在 Git 中 From 43402e7a260827a8da029860e9f0416178a90832 Mon Sep 17 00:00:00 2001 From: CJACK Date: Fri, 1 May 2026 22:05:45 +0800 Subject: [PATCH 05/15] refactor: rename history file constant from HISTORY.txt to DS2API_HISTORY.txt across codebase and tests --- README.MD | 2 +- README.en.md | 2 +- docs/prompt-compatibility.md | 12 ++++---- .../httpapi/openai/chat/chat_history_test.go | 4 +-- .../openai/history/current_input_file.go | 2 +- internal/httpapi/openai/history_split_test.go | 30 +++++++++---------- internal/promptcompat/history_transcript.go | 4 +-- .../upstream.stream.sse | 2 +- .../upstream.stream.sse | 2 +- webui/src/locales/en.json | 4 +-- webui/src/locales/zh.json | 4 +-- 11 files changed, 34 insertions(+), 34 deletions(-) diff --git a/README.MD b/README.MD index be7698f..ebb8a70 100644 --- a/README.MD +++ b/README.MD @@ -318,7 +318,7 @@ go run ./cmd/ds2api - `runtime`:账号并发、队列与 token 刷新策略,可通过 Admin Settings 热更新。 - `auto_delete.mode`:请求结束后的远端会话清理策略,支持 `none` / `single` / `all`。 - `history_split`:旧轮次拆分字段,已废弃并忽略,仅保留兼容旧配置。 -- `current_input_file`:唯一生效的独立拆分策略;默认开启且阈值为 `0`,触发时将完整上下文合并上传为 `history.txt` 上下文文件。 +- `current_input_file`:唯一生效的独立拆分策略;默认开启且阈值为 `0`,触发时将完整上下文合并上传为 `DS2API_HISTORY.txt` 上下文文件。 - 如果关闭 `current_input_file`,请求会直接透传,不上传拆分上下文文件。 - `thinking_injection`:默认开启;在最新 user 消息末尾追加思考增强提示词,提高高强度推理与工具调用前的思考稳定性;`prompt` 留空时使用内置默认提示词。 diff --git a/README.en.md b/README.en.md index c2ebf0e..267f7b1 100644 --- a/README.en.md +++ b/README.en.md @@ -306,7 +306,7 @@ Common fields: - `runtime`: account concurrency, queueing, and token refresh behavior, hot-reloadable via Admin Settings. - `auto_delete.mode`: remote session cleanup after each request, supporting `none` / `single` / `all`. - `history_split`: legacy multi-turn history split field, now ignored and kept only for backward-compatible config loading. -- `current_input_file`: the only active split mode; it is enabled by default and uploads the full context as a `history.txt` context file once the character threshold is reached. +- `current_input_file`: the only active split mode; it is enabled by default and uploads the full context as a `DS2API_HISTORY.txt` context file once the character threshold is reached. - If you turn off `current_input_file`, requests pass through directly without uploading any split context file. For the full environment variable list, see [docs/DEPLOY.en.md](docs/DEPLOY.en.md). For auth behavior, see [API.en.md](API.en.md#authentication). diff --git a/docs/prompt-compatibility.md b/docs/prompt-compatibility.md index f5e55c8..f3538ff 100644 --- a/docs/prompt-compatibility.md +++ b/docs/prompt-compatibility.md @@ -249,7 +249,7 @@ OpenAI 文件相关实现: 兼容层现在只保留 `current_input_file` 这一种拆分方式;旧的 `history_split` 已废弃,只保留为兼容旧配置的字段,不再参与请求处理。 -- `current_input_file` 默认开启;它用于把“完整上下文”合并进 `HISTORY.txt` 上下文文件。当最新 user turn 的纯文本长度达到 `current_input_file.min_chars`(默认 `0`)时,兼容层会上传一个文件名为 `HISTORY.txt` 的上下文文件。文件内容会先做 OpenAI 消息标准化,再序列化成按轮次编号的 `HISTORY.txt` 风格 transcript,带有 `# HISTORY.txt` 标题和 `=== N. ROLE ===` 分段;live prompt 中则只保留一个中性的 user 消息要求模型直接回答最新请求,不再暴露文件名或要求模型读取本地文件。 +- `current_input_file` 默认开启;它用于把“完整上下文”合并进 `DS2API_HISTORY.txt` 上下文文件。当最新 user turn 的纯文本长度达到 `current_input_file.min_chars`(默认 `0`)时,兼容层会上传一个文件名为 `DS2API_HISTORY.txt` 的上下文文件。文件内容会先做 OpenAI 消息标准化,再序列化成按轮次编号的 `DS2API_HISTORY.txt` 风格 transcript,带有 `# DS2API_HISTORY.txt` 标题和 `=== N. ROLE ===` 分段;live prompt 中则只保留一个中性的 user 消息要求模型直接回答最新请求,不再暴露文件名或要求模型读取本地文件。 - 如果 `current_input_file.enabled=false`,请求会直接透传,不上传任何拆分上下文文件。 - 旧的 `history_split.enabled` / `history_split.trigger_after_turns` 会被读取进配置对象以保持兼容,但不会触发拆分上传,也不会影响 `current_input_file` 的默认开启。 - 即使触发 `current_input_file` 后 live prompt 被缩短,对客户端回包里的上下文 token 统计,仍会沿用**拆分前的完整 prompt 语义**做计数,而不是按缩短后的占位 prompt 计算;否则会把真实上下文显著算小。 @@ -263,11 +263,11 @@ OpenAI 文件相关实现: - 旧历史拆分兼容壳: [internal/httpapi/openai/history/history_split.go](../internal/httpapi/openai/history/history_split.go) -当前输入转文件启用并触发时,上传文件的真实文件名是 `HISTORY.txt`,文件内容是完整 `messages` 上下文;它仍会先用 OpenAI 消息标准化和 DeepSeek 角色标记序列化,再按轮次编号成 `HISTORY.txt` 风格的 transcript(不再注入文件边界标签): +当前输入转文件启用并触发时,上传文件的真实文件名是 `DS2API_HISTORY.txt`,文件内容是完整 `messages` 上下文;它仍会先用 OpenAI 消息标准化和 DeepSeek 角色标记序列化,再按轮次编号成 `DS2API_HISTORY.txt` 风格的 transcript(不再注入文件边界标签): ```text -[uploaded filename]: HISTORY.txt -# HISTORY.txt +[uploaded filename]: DS2API_HISTORY.txt +# DS2API_HISTORY.txt Prior conversation history and tool progress. === 1. SYSTEM === @@ -347,7 +347,7 @@ Prior conversation history and tool progress. - 大部分结构化语义被压进 `prompt` - 文件保持文件 -- 需要时把完整上下文拆进 `HISTORY.txt` 上下文文件,并按轮次编号成 transcript +- 需要时把完整上下文拆进 `DS2API_HISTORY.txt` 上下文文件,并按轮次编号成 transcript ## 12. 修改时必须同步本文档的场景 @@ -360,7 +360,7 @@ Prior conversation history and tool progress. - tool result 注入方式变更 - tool prompt 模板或 tool_choice 约束变更 - inline 文件上传 / 文件引用收集规则变更 -- current input file 触发条件、上传格式、`HISTORY.txt` transcript 结构变更 +- current input file 触发条件、上传格式、`DS2API_HISTORY.txt` transcript 结构变更 - 旧 `history_split` 兼容逻辑的读取、忽略或退化行为变更 - completion payload 字段语义变更 - Claude / Gemini 对这套统一语义的复用关系变更 diff --git a/internal/httpapi/openai/chat/chat_history_test.go b/internal/httpapi/openai/chat/chat_history_test.go index 6b9f2a0..89bc02d 100644 --- a/internal/httpapi/openai/chat/chat_history_test.go +++ b/internal/httpapi/openai/chat/chat_history_test.go @@ -311,8 +311,8 @@ func TestChatCompletionsCurrentInputFilePersistsNeutralPrompt(t *testing.T) { if len(ds.uploadCalls) != 1 { t.Fatalf("expected current input upload to happen, got %d", len(ds.uploadCalls)) } - if ds.uploadCalls[0].Filename != "HISTORY.txt" { - t.Fatalf("expected HISTORY.txt upload, got %q", ds.uploadCalls[0].Filename) + if ds.uploadCalls[0].Filename != "DS2API_HISTORY.txt" { + t.Fatalf("expected DS2API_HISTORY.txt upload, got %q", ds.uploadCalls[0].Filename) } if full.HistoryText != string(ds.uploadCalls[0].Data) { t.Fatalf("expected uploaded current input file to be persisted in history text") diff --git a/internal/httpapi/openai/history/current_input_file.go b/internal/httpapi/openai/history/current_input_file.go index 7069207..10d5297 100644 --- a/internal/httpapi/openai/history/current_input_file.go +++ b/internal/httpapi/openai/history/current_input_file.go @@ -62,7 +62,7 @@ func (s Service) ApplyCurrentInputFile(ctx context.Context, a *auth.RequestAuth, stdReq.RefFileIDs = prependUniqueRefFileID(stdReq.RefFileIDs, fileID) stdReq.FinalPrompt, stdReq.ToolNames = promptcompat.BuildOpenAIPrompt(messages, stdReq.ToolsRaw, "", stdReq.ToolChoice, stdReq.Thinking) // Token accounting must reflect the actual downstream context: - // the uploaded HISTORY.txt file content + the neutral live prompt. + // the uploaded DS2API_HISTORY.txt file content + the neutral live prompt. stdReq.PromptTokenText = fileText + "\n" + stdReq.FinalPrompt return stdReq, nil } diff --git a/internal/httpapi/openai/history_split_test.go b/internal/httpapi/openai/history_split_test.go index 799a1f7..27a3bf7 100644 --- a/internal/httpapi/openai/history_split_test.go +++ b/internal/httpapi/openai/history_split_test.go @@ -68,7 +68,7 @@ func TestBuildOpenAICurrentInputContextTranscriptUsesNumberedHistorySections(t * if strings.Contains(transcript, "[file content end]") || strings.Contains(transcript, "[file content begin]") || strings.Contains(transcript, "[file name]:") { t.Fatalf("expected transcript without file wrapper tags, got %q", transcript) } - if !strings.Contains(transcript, "# HISTORY.txt") { + if !strings.Contains(transcript, "# DS2API_HISTORY.txt") { t.Fatalf("expected history transcript header, got %q", transcript) } if !strings.Contains(transcript, "Prior conversation history and tool progress.") { @@ -280,7 +280,7 @@ func TestApplyCurrentInputFileUploadsFirstTurnWithNumberedHistoryTranscript(t *t t.Fatalf("expected 1 current input upload, got %d", len(ds.uploadCalls)) } upload := ds.uploadCalls[0] - if upload.Filename != "HISTORY.txt" { + if upload.Filename != "DS2API_HISTORY.txt" { t.Fatalf("unexpected upload filename: %q", upload.Filename) } uploadedText := string(upload.Data) @@ -288,7 +288,7 @@ func TestApplyCurrentInputFileUploadsFirstTurnWithNumberedHistoryTranscript(t *t t.Fatalf("expected uploaded transcript without file wrapper tags, got %q", uploadedText) } for _, want := range []string{ - "# HISTORY.txt", + "# DS2API_HISTORY.txt", "=== 1. USER ===", "first turn content that is long enough", } { @@ -303,7 +303,7 @@ func TestApplyCurrentInputFileUploadsFirstTurnWithNumberedHistoryTranscript(t *t if strings.Contains(out.FinalPrompt, "first turn content that is long enough") { t.Fatalf("expected current input text to be replaced in live prompt, got %s", out.FinalPrompt) } - if strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "history.txt") || strings.Contains(out.FinalPrompt, "HISTORY.txt") || strings.Contains(out.FinalPrompt, "Read that file") { + if strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "DS2API_HISTORY.txt") || strings.Contains(out.FinalPrompt, "DS2API_HISTORY.txt") || strings.Contains(out.FinalPrompt, "Read that file") { t.Fatalf("expected live prompt not to instruct file reads, got %s", out.FinalPrompt) } if !strings.Contains(out.FinalPrompt, "Answer the latest user request directly.") { @@ -315,7 +315,7 @@ func TestApplyCurrentInputFileUploadsFirstTurnWithNumberedHistoryTranscript(t *t if !strings.Contains(out.PromptTokenText, "first turn content that is long enough") { t.Fatalf("expected prompt token text to preserve original full context, got %q", out.PromptTokenText) } - if !strings.Contains(out.PromptTokenText, "# HISTORY.txt") || !strings.Contains(out.PromptTokenText, "=== 1. USER ===") { + if !strings.Contains(out.PromptTokenText, "# DS2API_HISTORY.txt") || !strings.Contains(out.PromptTokenText, "=== 1. USER ===") { t.Fatalf("expected prompt token text to include numbered history transcript, got %q", out.PromptTokenText) } } @@ -355,7 +355,7 @@ func TestApplyCurrentInputFilePreservesFullContextPromptForTokenCounting(t *test if strings.Contains(out.PromptTokenText, "[file content end]") || strings.Contains(out.PromptTokenText, "[file name]:") { t.Fatalf("expected prompt token text to omit file wrapper tags, got %q", out.PromptTokenText) } - if !strings.Contains(out.PromptTokenText, "# HISTORY.txt") || !strings.Contains(out.PromptTokenText, "=== 1. SYSTEM ===") { + if !strings.Contains(out.PromptTokenText, "# DS2API_HISTORY.txt") || !strings.Contains(out.PromptTokenText, "=== 1. SYSTEM ===") { t.Fatalf("expected prompt token text to include numbered history transcript, got %q", out.PromptTokenText) } if !strings.Contains(out.PromptTokenText, "Answer the latest user request directly.") { @@ -397,16 +397,16 @@ func TestApplyCurrentInputFileUploadsFullContextFile(t *testing.T) { t.Fatalf("expected one current input upload, got %d", len(ds.uploadCalls)) } upload := ds.uploadCalls[0] - if upload.Filename != "HISTORY.txt" { - t.Fatalf("expected HISTORY.txt upload, got %q", upload.Filename) + if upload.Filename != "DS2API_HISTORY.txt" { + t.Fatalf("expected DS2API_HISTORY.txt upload, got %q", upload.Filename) } uploadedText := string(upload.Data) - for _, want := range []string{"# HISTORY.txt", "=== 1. SYSTEM ===", "=== 2. USER ===", "=== 3. ASSISTANT ===", "=== 4. TOOL ===", "=== 5. USER ===", "system instructions", "first user turn", "hidden reasoning", "tool result", "latest user turn", promptcompat.ThinkingInjectionMarker} { + for _, want := range []string{"# DS2API_HISTORY.txt", "=== 1. SYSTEM ===", "=== 2. USER ===", "=== 3. ASSISTANT ===", "=== 4. TOOL ===", "=== 5. USER ===", "system instructions", "first user turn", "hidden reasoning", "tool result", "latest user turn", promptcompat.ThinkingInjectionMarker} { if !strings.Contains(uploadedText, want) { t.Fatalf("expected full context file to contain %q, got %q", want, uploadedText) } } - if strings.Contains(out.FinalPrompt, "first user turn") || strings.Contains(out.FinalPrompt, "latest user turn") || strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "history.txt") || strings.Contains(out.FinalPrompt, "HISTORY.txt") || strings.Contains(out.FinalPrompt, "Read that file") { + if strings.Contains(out.FinalPrompt, "first user turn") || strings.Contains(out.FinalPrompt, "latest user turn") || strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "DS2API_HISTORY.txt") || strings.Contains(out.FinalPrompt, "DS2API_HISTORY.txt") || strings.Contains(out.FinalPrompt, "Read that file") { t.Fatalf("expected live prompt to use only a neutral continuation instruction, got %s", out.FinalPrompt) } if !strings.Contains(out.FinalPrompt, "Answer the latest user request directly.") { @@ -442,7 +442,7 @@ func TestApplyCurrentInputFileCarriesHistoryText(t *testing.T) { if out.HistoryText != string(ds.uploadCalls[0].Data) { t.Fatalf("expected current input file flow to preserve uploaded text in history, got %q", out.HistoryText) } - if !strings.Contains(out.HistoryText, "# HISTORY.txt") || !strings.Contains(out.HistoryText, "=== 1. SYSTEM ===") { + if !strings.Contains(out.HistoryText, "# DS2API_HISTORY.txt") || !strings.Contains(out.HistoryText, "=== 1. SYSTEM ===") { t.Fatalf("expected history text to use numbered transcript format, got %q", out.HistoryText) } } @@ -476,7 +476,7 @@ func TestChatCompletionsCurrentInputFileUploadsContextAndKeepsNeutralPrompt(t *t t.Fatalf("expected 1 upload call, got %d", len(ds.uploadCalls)) } upload := ds.uploadCalls[0] - if upload.Filename != "HISTORY.txt" { + if upload.Filename != "DS2API_HISTORY.txt" { t.Fatalf("unexpected upload filename: %q", upload.Filename) } if upload.Purpose != "assistants" { @@ -486,7 +486,7 @@ func TestChatCompletionsCurrentInputFileUploadsContextAndKeepsNeutralPrompt(t *t if strings.Contains(historyText, "[file content end]") || strings.Contains(historyText, "[file content begin]") || strings.Contains(historyText, "[file name]:") { t.Fatalf("expected history transcript without file wrapper tags, got %s", historyText) } - if !strings.Contains(historyText, "# HISTORY.txt") || !strings.Contains(historyText, "=== 1. SYSTEM ===") { + if !strings.Contains(historyText, "# DS2API_HISTORY.txt") || !strings.Contains(historyText, "=== 1. SYSTEM ===") { t.Fatalf("expected history transcript to use numbered sections, got %s", historyText) } if !strings.Contains(historyText, "latest user turn") { @@ -549,7 +549,7 @@ func TestResponsesCurrentInputFileUploadsContextAndKeepsNeutralPrompt(t *testing t.Fatalf("expected 1 upload call, got %d", len(ds.uploadCalls)) } historyText := string(ds.uploadCalls[0].Data) - if !strings.Contains(historyText, "# HISTORY.txt") || !strings.Contains(historyText, "=== 1. SYSTEM ===") { + if !strings.Contains(historyText, "# DS2API_HISTORY.txt") || !strings.Contains(historyText, "=== 1. SYSTEM ===") { t.Fatalf("expected uploaded history text to use numbered transcript format, got %s", historyText) } if ds.completionReq == nil { @@ -699,7 +699,7 @@ func TestCurrentInputFileWorksAcrossAutoDeleteModes(t *testing.T) { t.Fatalf("expected current input upload for mode=%s, got %d", mode, len(ds.uploadCalls)) } historyText := string(ds.uploadCalls[0].Data) - if !strings.Contains(historyText, "# HISTORY.txt") || !strings.Contains(historyText, "=== 1. SYSTEM ===") { + if !strings.Contains(historyText, "# DS2API_HISTORY.txt") || !strings.Contains(historyText, "=== 1. SYSTEM ===") { t.Fatalf("expected uploaded history text to use numbered transcript format, got %s", historyText) } if ds.completionReq == nil { diff --git a/internal/promptcompat/history_transcript.go b/internal/promptcompat/history_transcript.go index 84a62cb..fbbfc4b 100644 --- a/internal/promptcompat/history_transcript.go +++ b/internal/promptcompat/history_transcript.go @@ -5,9 +5,9 @@ import ( "strings" ) -const CurrentInputContextFilename = "HISTORY.txt" +const CurrentInputContextFilename = "DS2API_HISTORY.txt" -const historyTranscriptTitle = "# HISTORY.txt" +const historyTranscriptTitle = "# DS2API_HISTORY.txt" const historyTranscriptSummary = "Prior conversation history and tool progress." func BuildOpenAIHistoryTranscript(messages []any) string { diff --git a/tests/raw_stream_samples/longtext-deepseek-v4-flash-20260429/upstream.stream.sse b/tests/raw_stream_samples/longtext-deepseek-v4-flash-20260429/upstream.stream.sse index 4048f8e..bc412db 100644 --- a/tests/raw_stream_samples/longtext-deepseek-v4-flash-20260429/upstream.stream.sse +++ b/tests/raw_stream_samples/longtext-deepseek-v4-flash-20260429/upstream.stream.sse @@ -1,4 +1,4 @@ -{"code":0,"msg":"","data":{"biz_code":0,"biz_msg":"","biz_data":{"id":"file-b10a2aca-39e9-4a38-be9d-9f22e398cb62","status":"PENDING","file_name":"history.txt","from_share":false,"file_size":732,"model_kind":"NORMAL","token_usage":null,"error_code":null,"inserted_at":1777485015.255,"updated_at":1777485015.255,"is_image":false,"audit_result":null}}} +{"code":0,"msg":"","data":{"biz_code":0,"biz_msg":"","biz_data":{"id":"file-b10a2aca-39e9-4a38-be9d-9f22e398cb62","status":"PENDING","file_name":"DS2API_HISTORY.txt","from_share":false,"file_size":732,"model_kind":"NORMAL","token_usage":null,"error_code":null,"inserted_at":1777485015.255,"updated_at":1777485015.255,"is_image":false,"audit_result":null}}} event: ready data: {"request_message_id":1,"response_message_id":2,"model_type":"default"} diff --git a/tests/raw_stream_samples/longtext-deepseek-v4-pro-20260429/upstream.stream.sse b/tests/raw_stream_samples/longtext-deepseek-v4-pro-20260429/upstream.stream.sse index 8b14272..72a3d07 100644 --- a/tests/raw_stream_samples/longtext-deepseek-v4-pro-20260429/upstream.stream.sse +++ b/tests/raw_stream_samples/longtext-deepseek-v4-pro-20260429/upstream.stream.sse @@ -1,4 +1,4 @@ -{"code":0,"msg":"","data":{"biz_code":0,"biz_msg":"","biz_data":{"id":"file-9c8ae986-75f7-4611-9956-5e1b502f3ec2","status":"SUCCESS","file_name":"history.txt","from_share":false,"file_size":732,"model_kind":"NORMAL","token_usage":145,"error_code":null,"inserted_at":1777485076.42,"updated_at":1777485076.42,"signed_path":"/file?file_id=9c8ae986-75f7-4611-9956-5e1b502f3ec2&state=a1REa2AdO8JmDuxMFiUTPJfpiyY4ie2weyUpYxfvEOrk5lxUCZifpRw9toZAEzn3DAjkgbR6blgZf41KLkHBKwwrcYTIjfxTRKijDqjEfguis03yddpuVrii6keG4%2BXIlcLAsyZG3qcGhfTGVZhsr%2BRl17J%2BcnT9roslhxBcEy4rthFJVMWUI%2BSHjuo2gLEUDfvMfULQ1gSLVGtr%2Fpq%2FcNPCPSxZapIQv04ZVmJLcdbzRkz%2Bb%2BxM5RWUIPujp%2B3ke1WDa3%2B6S4pP0Pv%2BAJ0MFUjQsloUwO4AsJ8YhGBFWg8Ehe1b2yt1N%2Fi%2BIjLRPt5xiNmALcJJXIY%3D","is_image":false,"audit_result":null}}} +{"code":0,"msg":"","data":{"biz_code":0,"biz_msg":"","biz_data":{"id":"file-9c8ae986-75f7-4611-9956-5e1b502f3ec2","status":"SUCCESS","file_name":"DS2API_HISTORY.txt","from_share":false,"file_size":732,"model_kind":"NORMAL","token_usage":145,"error_code":null,"inserted_at":1777485076.42,"updated_at":1777485076.42,"signed_path":"/file?file_id=9c8ae986-75f7-4611-9956-5e1b502f3ec2&state=a1REa2AdO8JmDuxMFiUTPJfpiyY4ie2weyUpYxfvEOrk5lxUCZifpRw9toZAEzn3DAjkgbR6blgZf41KLkHBKwwrcYTIjfxTRKijDqjEfguis03yddpuVrii6keG4%2BXIlcLAsyZG3qcGhfTGVZhsr%2BRl17J%2BcnT9roslhxBcEy4rthFJVMWUI%2BSHjuo2gLEUDfvMfULQ1gSLVGtr%2Fpq%2FcNPCPSxZapIQv04ZVmJLcdbzRkz%2Bb%2BxM5RWUIPujp%2B3ke1WDa3%2B6S4pP0Pv%2BAJ0MFUjQsloUwO4AsJ8YhGBFWg8Ehe1b2yt1N%2Fi%2BIjLRPt5xiNmALcJJXIY%3D","is_image":false,"audit_result":null}}} event: ready data: {"request_message_id":1,"response_message_id":2,"model_type":"expert"} diff --git a/webui/src/locales/en.json b/webui/src/locales/en.json index 072e514..0808398 100644 --- a/webui/src/locales/en.json +++ b/webui/src/locales/en.json @@ -394,7 +394,7 @@ "thinkingInjectionPromptHelp": "Leave empty to use the built-in default prompt shown as the input placeholder.", "currentInputFileTitle": "Independent Split", "currentInputFileEnabled": "Independent split (by size)", - "currentInputFileDesc": "Enabled by default. Once the character threshold is reached, upload the full context as a HISTORY.txt context file.", + "currentInputFileDesc": "Enabled by default. Once the character threshold is reached, upload the full context as a DS2API_HISTORY.txt context file.", "currentInputFileMinChars": "Current input threshold (characters)", "currentInputFileHelp": "Default is 0, which uses independent split for any non-empty input.", "compatibilityTitle": "Compatibility", @@ -485,4 +485,4 @@ "four": "Trigger a redeploy to apply the updated environment variables." } } -} +} \ No newline at end of file diff --git a/webui/src/locales/zh.json b/webui/src/locales/zh.json index 409f48d..b3b2460 100644 --- a/webui/src/locales/zh.json +++ b/webui/src/locales/zh.json @@ -394,7 +394,7 @@ "thinkingInjectionPromptHelp": "留空时使用内置默认提示词;默认内容会显示在输入框占位文本中。", "currentInputFileTitle": "独立拆分", "currentInputFileEnabled": "独立拆分(按量)", - "currentInputFileDesc": "默认开启。达到字符阈值后,将完整上下文上传为 HISTORY.txt 上下文文件。", + "currentInputFileDesc": "默认开启。达到字符阈值后,将完整上下文上传为 DS2API_HISTORY.txt 上下文文件。", "currentInputFileMinChars": "当前输入阈值(字符数)", "currentInputFileHelp": "默认 0,表示只要有输入就会使用独立拆分。", "compatibilityTitle": "兼容性设置", @@ -485,4 +485,4 @@ "four": "触发重新部署以应用新的环境变量。" } } -} +} \ No newline at end of file From dd5a0c52136a0fdfc2e76c8873cc2603ffc53f48 Mon Sep 17 00:00:00 2001 From: CJACK Date: Fri, 1 May 2026 22:27:59 +0800 Subject: [PATCH 06/15] refactor: update and standardize current input file continuation prompt instructions --- docs/prompt-compatibility.md | 4 +-- .../httpapi/openai/chat/chat_history_test.go | 6 ++-- .../openai/chat/vercel_prepare_test.go | 4 +-- .../openai/history/current_input_file.go | 4 +-- internal/httpapi/openai/history_split_test.go | 28 +++++++++---------- .../chatHistory/ChatHistoryContainer.jsx | 12 ++++++-- 6 files changed, 33 insertions(+), 25 deletions(-) diff --git a/docs/prompt-compatibility.md b/docs/prompt-compatibility.md index f3538ff..58c2c6c 100644 --- a/docs/prompt-compatibility.md +++ b/docs/prompt-compatibility.md @@ -249,7 +249,7 @@ OpenAI 文件相关实现: 兼容层现在只保留 `current_input_file` 这一种拆分方式;旧的 `history_split` 已废弃,只保留为兼容旧配置的字段,不再参与请求处理。 -- `current_input_file` 默认开启;它用于把“完整上下文”合并进 `DS2API_HISTORY.txt` 上下文文件。当最新 user turn 的纯文本长度达到 `current_input_file.min_chars`(默认 `0`)时,兼容层会上传一个文件名为 `DS2API_HISTORY.txt` 的上下文文件。文件内容会先做 OpenAI 消息标准化,再序列化成按轮次编号的 `DS2API_HISTORY.txt` 风格 transcript,带有 `# DS2API_HISTORY.txt` 标题和 `=== N. ROLE ===` 分段;live prompt 中则只保留一个中性的 user 消息要求模型直接回答最新请求,不再暴露文件名或要求模型读取本地文件。 +- `current_input_file` 默认开启;它用于把“完整上下文”合并进 `DS2API_HISTORY.txt` 上下文文件。当最新 user turn 的纯文本长度达到 `current_input_file.min_chars`(默认 `0`)时,兼容层会上传一个文件名为 `DS2API_HISTORY.txt` 的上下文文件。文件内容会先做 OpenAI 消息标准化,再序列化成按轮次编号的 `DS2API_HISTORY.txt` 风格 transcript,带有 `# DS2API_HISTORY.txt` 标题和 `=== N. ROLE ===` 分段;live prompt 中则会给出一个 continuation 语气的 user 消息,引导模型从 `DS2API_HISTORY.txt` 的最新状态继续推进,并直接回答最新请求,避免把任务拉回起点。 - 如果 `current_input_file.enabled=false`,请求会直接透传,不上传任何拆分上下文文件。 - 旧的 `history_split.enabled` / `history_split.trigger_after_turns` 会被读取进配置对象以保持兼容,但不会触发拆分上传,也不会影响 `current_input_file` 的默认开启。 - 即使触发 `current_input_file` 后 live prompt 被缩短,对客户端回包里的上下文 token 统计,仍会沿用**拆分前的完整 prompt 语义**做计数,而不是按缩短后的占位 prompt 计算;否则会把真实上下文显著算小。 @@ -332,7 +332,7 @@ Prior conversation history and tool progress. ```json { - "prompt": "<|begin▁of▁sentence|><|System|>原 system / developer\n\nYou have access to these tools: ...<|end▁of▁instructions|><|User|>The current request and prior conversation context have already been provided. Answer the latest user request directly.<|Assistant|>", + "prompt": "<|begin▁of▁sentence|><|System|>原 system / developer\n\nYou have access to these tools: ...<|end▁of▁instructions|><|User|>Continue from the latest state in the attached DS2API_HISTORY.txt context. Treat it as the current working state and answer the latest user request directly.<|Assistant|>", "ref_file_ids": [ "file-current-input-ignore", "file-systemprompt", diff --git a/internal/httpapi/openai/chat/chat_history_test.go b/internal/httpapi/openai/chat/chat_history_test.go index 89bc02d..e0c47fc 100644 --- a/internal/httpapi/openai/chat/chat_history_test.go +++ b/internal/httpapi/openai/chat/chat_history_test.go @@ -318,9 +318,9 @@ func TestChatCompletionsCurrentInputFilePersistsNeutralPrompt(t *testing.T) { t.Fatalf("expected uploaded current input file to be persisted in history text") } if len(full.Messages) != 1 { - t.Fatalf("expected neutral prompt to be the only persisted message, got %#v", full.Messages) + t.Fatalf("expected continuation prompt to be the only persisted message, got %#v", full.Messages) } - if !strings.Contains(full.Messages[0].Content, "Answer the latest user request directly.") { - t.Fatalf("expected neutral prompt to be persisted, got %#v", full.Messages[0]) + if !strings.Contains(full.Messages[0].Content, "Continue from the latest state in the attached DS2API_HISTORY.txt context.") { + t.Fatalf("expected continuation prompt to be persisted, got %#v", full.Messages[0]) } } diff --git a/internal/httpapi/openai/chat/vercel_prepare_test.go b/internal/httpapi/openai/chat/vercel_prepare_test.go index 59e62d9..b27be18 100644 --- a/internal/httpapi/openai/chat/vercel_prepare_test.go +++ b/internal/httpapi/openai/chat/vercel_prepare_test.go @@ -130,8 +130,8 @@ func TestHandleVercelStreamPrepareAppliesCurrentInputFile(t *testing.T) { t.Fatalf("expected payload object, got %#v", body["payload"]) } promptText, _ := payload["prompt"].(string) - if !strings.Contains(promptText, "Answer the latest user request directly.") { - t.Fatalf("expected neutral prompt, got %s", promptText) + if !strings.Contains(promptText, "Continue from the latest state in the attached DS2API_HISTORY.txt context.") { + t.Fatalf("expected continuation prompt, got %s", promptText) } if strings.Contains(promptText, "first user turn") || strings.Contains(promptText, "latest user turn") { t.Fatalf("expected original turns hidden from prompt, got %s", promptText) diff --git a/internal/httpapi/openai/history/current_input_file.go b/internal/httpapi/openai/history/current_input_file.go index 10d5297..648331c 100644 --- a/internal/httpapi/openai/history/current_input_file.go +++ b/internal/httpapi/openai/history/current_input_file.go @@ -62,7 +62,7 @@ func (s Service) ApplyCurrentInputFile(ctx context.Context, a *auth.RequestAuth, stdReq.RefFileIDs = prependUniqueRefFileID(stdReq.RefFileIDs, fileID) stdReq.FinalPrompt, stdReq.ToolNames = promptcompat.BuildOpenAIPrompt(messages, stdReq.ToolsRaw, "", stdReq.ToolChoice, stdReq.Thinking) // Token accounting must reflect the actual downstream context: - // the uploaded DS2API_HISTORY.txt file content + the neutral live prompt. + // the uploaded DS2API_HISTORY.txt file content + the continuation live prompt. stdReq.PromptTokenText = fileText + "\n" + stdReq.FinalPrompt return stdReq, nil } @@ -87,5 +87,5 @@ func latestUserInputForFile(messages []any) (int, string) { } func currentInputFilePrompt() string { - return "The current request and prior conversation context have already been provided. Answer the latest user request directly." + return "Continue from the latest state in the attached DS2API_HISTORY.txt context. Treat it as the current working state and answer the latest user request directly." } diff --git a/internal/httpapi/openai/history_split_test.go b/internal/httpapi/openai/history_split_test.go index 27a3bf7..d223689 100644 --- a/internal/httpapi/openai/history_split_test.go +++ b/internal/httpapi/openai/history_split_test.go @@ -303,11 +303,11 @@ func TestApplyCurrentInputFileUploadsFirstTurnWithNumberedHistoryTranscript(t *t if strings.Contains(out.FinalPrompt, "first turn content that is long enough") { t.Fatalf("expected current input text to be replaced in live prompt, got %s", out.FinalPrompt) } - if strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "DS2API_HISTORY.txt") || strings.Contains(out.FinalPrompt, "DS2API_HISTORY.txt") || strings.Contains(out.FinalPrompt, "Read that file") { + if strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "Read that file") { t.Fatalf("expected live prompt not to instruct file reads, got %s", out.FinalPrompt) } - if !strings.Contains(out.FinalPrompt, "Answer the latest user request directly.") { - t.Fatalf("expected neutral continuation instruction in live prompt, got %s", out.FinalPrompt) + if !strings.Contains(out.FinalPrompt, "Continue from the latest state in the attached DS2API_HISTORY.txt context.") { + t.Fatalf("expected continuation-oriented prompt in live prompt, got %s", out.FinalPrompt) } if len(out.RefFileIDs) != 1 || out.RefFileIDs[0] != "file-inline-1" { t.Fatalf("expected current input file id in ref_file_ids, got %#v", out.RefFileIDs) @@ -358,8 +358,8 @@ func TestApplyCurrentInputFilePreservesFullContextPromptForTokenCounting(t *test if !strings.Contains(out.PromptTokenText, "# DS2API_HISTORY.txt") || !strings.Contains(out.PromptTokenText, "=== 1. SYSTEM ===") { t.Fatalf("expected prompt token text to include numbered history transcript, got %q", out.PromptTokenText) } - if !strings.Contains(out.PromptTokenText, "Answer the latest user request directly.") { - t.Fatalf("expected prompt token text to also include neutral live prompt, got %q", out.PromptTokenText) + if !strings.Contains(out.PromptTokenText, "Continue from the latest state in the attached DS2API_HISTORY.txt context.") { + t.Fatalf("expected prompt token text to also include continuation prompt, got %q", out.PromptTokenText) } if strings.Contains(out.FinalPrompt, "first user turn") || strings.Contains(out.FinalPrompt, "latest user turn") { t.Fatalf("expected live prompt to hide original turns, got %q", out.FinalPrompt) @@ -406,11 +406,11 @@ func TestApplyCurrentInputFileUploadsFullContextFile(t *testing.T) { t.Fatalf("expected full context file to contain %q, got %q", want, uploadedText) } } - if strings.Contains(out.FinalPrompt, "first user turn") || strings.Contains(out.FinalPrompt, "latest user turn") || strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "DS2API_HISTORY.txt") || strings.Contains(out.FinalPrompt, "DS2API_HISTORY.txt") || strings.Contains(out.FinalPrompt, "Read that file") { - t.Fatalf("expected live prompt to use only a neutral continuation instruction, got %s", out.FinalPrompt) + if strings.Contains(out.FinalPrompt, "first user turn") || strings.Contains(out.FinalPrompt, "latest user turn") || strings.Contains(out.FinalPrompt, "CURRENT_USER_INPUT.txt") || strings.Contains(out.FinalPrompt, "Read that file") { + t.Fatalf("expected live prompt to use only a continuation instruction, got %s", out.FinalPrompt) } - if !strings.Contains(out.FinalPrompt, "Answer the latest user request directly.") { - t.Fatalf("expected neutral continuation instruction in live prompt, got %s", out.FinalPrompt) + if !strings.Contains(out.FinalPrompt, "Continue from the latest state in the attached DS2API_HISTORY.txt context.") { + t.Fatalf("expected continuation-oriented prompt in live prompt, got %s", out.FinalPrompt) } } @@ -496,8 +496,8 @@ func TestChatCompletionsCurrentInputFileUploadsContextAndKeepsNeutralPrompt(t *t t.Fatal("expected completion payload to be captured") } promptText, _ := ds.completionReq["prompt"].(string) - if !strings.Contains(promptText, "Answer the latest user request directly.") { - t.Fatalf("expected neutral completion prompt, got %s", promptText) + if !strings.Contains(promptText, "Continue from the latest state in the attached DS2API_HISTORY.txt context.") { + t.Fatalf("expected continuation-oriented prompt, got %s", promptText) } if strings.Contains(promptText, "first user turn") || strings.Contains(promptText, "latest user turn") { t.Fatalf("expected prompt to hide original turns, got %s", promptText) @@ -556,8 +556,8 @@ func TestResponsesCurrentInputFileUploadsContextAndKeepsNeutralPrompt(t *testing t.Fatal("expected completion payload to be captured") } promptText, _ := ds.completionReq["prompt"].(string) - if !strings.Contains(promptText, "Answer the latest user request directly.") { - t.Fatalf("expected neutral completion prompt, got %s", promptText) + if !strings.Contains(promptText, "Continue from the latest state in the attached DS2API_HISTORY.txt context.") { + t.Fatalf("expected continuation-oriented prompt, got %s", promptText) } if strings.Contains(promptText, "first user turn") || strings.Contains(promptText, "latest user turn") { t.Fatalf("expected prompt to hide original turns, got %s", promptText) @@ -706,7 +706,7 @@ func TestCurrentInputFileWorksAcrossAutoDeleteModes(t *testing.T) { t.Fatalf("expected completion payload for mode=%s", mode) } promptText, _ := ds.completionReq["prompt"].(string) - if !strings.Contains(promptText, "Answer the latest user request directly.") || strings.Contains(promptText, "first user turn") || strings.Contains(promptText, "latest user turn") { + if !strings.Contains(promptText, "Continue from the latest state in the attached DS2API_HISTORY.txt context.") || strings.Contains(promptText, "first user turn") || strings.Contains(promptText, "latest user turn") { t.Fatalf("unexpected prompt for mode=%s: %s", mode, promptText) } }) diff --git a/webui/src/features/chatHistory/ChatHistoryContainer.jsx b/webui/src/features/chatHistory/ChatHistoryContainer.jsx index e0b574f..c0dc98c 100644 --- a/webui/src/features/chatHistory/ChatHistoryContainer.jsx +++ b/webui/src/features/chatHistory/ChatHistoryContainer.jsx @@ -16,7 +16,15 @@ const TOOL_MARKER = '<|Tool|>' const END_INSTRUCTIONS_MARKER = '<|end▁of▁instructions|>' const END_SENTENCE_MARKER = '<|end▁of▁sentence|>' const END_TOOL_RESULTS_MARKER = '<|end▁of▁toolresults|>' -const CURRENT_INPUT_FILE_PROMPT = 'The current request and prior conversation context have already been provided. Answer the latest user request directly.' +const CURRENT_INPUT_FILE_PROMPT = 'Continue from the latest state in the attached DS2API_HISTORY.txt context. Treat it as the current working state and answer the latest user request directly.' +const LEGACY_CURRENT_INPUT_FILE_PROMPTS = new Set([ + 'The current request and prior conversation context have already been provided. Answer the latest user request directly.', +]) + +function isCurrentInputFilePrompt(value) { + const text = String(value || '').trim() + return text === CURRENT_INPUT_FILE_PROMPT || LEGACY_CURRENT_INPUT_FILE_PROMPTS.has(text) +} function formatDateTime(value, lang) { if (!value) return '-' @@ -312,7 +320,7 @@ function buildListModeMessages(item, t) { const placeholderOnly = liveMessages.length === 1 && String(liveMessages[0]?.role || '').trim().toLowerCase() === 'user' - && String(liveMessages[0]?.content || '').trim() === CURRENT_INPUT_FILE_PROMPT + && isCurrentInputFilePrompt(liveMessages[0]?.content) if (placeholderOnly) { return { messages: historyMessages, historyMerged: true } From 0bca6e2cee713b16896c587a07ae7dae62cf681b Mon Sep 17 00:00:00 2001 From: CJACK Date: Fri, 1 May 2026 23:17:58 +0800 Subject: [PATCH 07/15] feat: implement context cancellation handling for chat and response stream runtimes to ensure clean termination without retries --- .../openai/chat/chat_stream_runtime.go | 9 ++ .../openai/chat/empty_retry_runtime.go | 7 +- .../openai/chat/empty_retry_runtime_test.go | 85 +++++++++++++++++++ .../openai/responses/empty_retry_runtime.go | 9 +- .../responses/empty_retry_runtime_test.go | 70 +++++++++++++++ .../responses_stream_runtime_core.go | 7 ++ 6 files changed, 185 insertions(+), 2 deletions(-) create mode 100644 internal/httpapi/openai/chat/empty_retry_runtime_test.go create mode 100644 internal/httpapi/openai/responses/empty_retry_runtime_test.go diff --git a/internal/httpapi/openai/chat/chat_stream_runtime.go b/internal/httpapi/openai/chat/chat_stream_runtime.go index 17ff0d5..a9270a1 100644 --- a/internal/httpapi/openai/chat/chat_stream_runtime.go +++ b/internal/httpapi/openai/chat/chat_stream_runtime.go @@ -173,6 +173,15 @@ func (s *chatStreamRuntime) sendFailedChunk(status int, message, code string) { s.sendDone() } +func (s *chatStreamRuntime) markContextCancelled() { + s.finalErrorStatus = 499 + s.finalErrorMessage = "request context cancelled" + s.finalErrorCode = string(streamengine.StopReasonContextCancelled) + s.finalThinking = s.thinking.String() + s.finalText = cleanVisibleOutput(s.text.String(), s.stripReferenceMarkers) + s.finalFinishReason = string(streamengine.StopReasonContextCancelled) +} + func (s *chatStreamRuntime) resetStreamToolCallState() { s.streamToolCallIDs = map[int]string{} s.streamToolNames = map[int]string{} diff --git a/internal/httpapi/openai/chat/empty_retry_runtime.go b/internal/httpapi/openai/chat/empty_retry_runtime.go index 147024f..464dd2c 100644 --- a/internal/httpapi/openai/chat/empty_retry_runtime.go +++ b/internal/httpapi/openai/chat/empty_retry_runtime.go @@ -247,12 +247,13 @@ func (h *Handler) consumeChatStreamAttempt(r *http.Request, resp *http.Response, } }, OnContextDone: func() { + streamRuntime.markContextCancelled() if historySession != nil { historySession.stopped(streamRuntime.thinking.String(), streamRuntime.text.String(), string(streamengine.StopReasonContextCancelled)) } }, }) - if r.Context().Err() != nil { + if streamRuntime.finalErrorCode == string(streamengine.StopReasonContextCancelled) { return true, false } terminalWritten := streamRuntime.finalize(finalReason, allowDeferEmpty && finalReason != "content_filter") @@ -286,6 +287,10 @@ func logChatStreamTerminal(streamRuntime *chatStreamRuntime, attempts int) { if attempts > 0 { source = "synthetic_retry" } + if streamRuntime.finalErrorCode == string(streamengine.StopReasonContextCancelled) { + config.Logger.Info("[openai_empty_retry] terminal cancelled", "surface", "chat.completions", "stream", true, "retry_attempts", attempts, "error_code", streamRuntime.finalErrorCode) + return + } if streamRuntime.finalErrorMessage != "" { config.Logger.Info("[openai_empty_retry] terminal empty output", "surface", "chat.completions", "stream", true, "retry_attempts", attempts, "success_source", "none", "error_code", streamRuntime.finalErrorCode) return diff --git a/internal/httpapi/openai/chat/empty_retry_runtime_test.go b/internal/httpapi/openai/chat/empty_retry_runtime_test.go new file mode 100644 index 0000000..ff8155f --- /dev/null +++ b/internal/httpapi/openai/chat/empty_retry_runtime_test.go @@ -0,0 +1,85 @@ +package chat + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "ds2api/internal/chathistory" + "ds2api/internal/stream" +) + +func TestConsumeChatStreamAttemptMarksContextCancelledState(t *testing.T) { + historyStore := newTestChatHistoryStore(t) + entry, err := historyStore.Start(chathistory.StartParams{ + CallerID: "caller:test", + Model: "deepseek-v4-flash", + Stream: true, + UserInput: "hello", + }) + if err != nil { + t.Fatalf("start history failed: %v", err) + } + session := &chatHistorySession{ + store: historyStore, + entryID: entry.ID, + startedAt: time.Now(), + lastPersist: time.Now(), + finalPrompt: "prompt", + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", nil).WithContext(ctx) + rec := httptest.NewRecorder() + streamRuntime := newChatStreamRuntime( + rec, + http.NewResponseController(rec), + true, + "cid-cancelled", + time.Now().Unix(), + "deepseek-v4-flash", + "prompt", + false, + false, + true, + nil, + nil, + false, + false, + ) + resp := makeOpenAISSEHTTPResponse( + `data: {"p":"response/content","v":"hello"}`, + `data: [DONE]`, + ) + + h := &Handler{} + terminalWritten, retryable := h.consumeChatStreamAttempt(req, resp, streamRuntime, "text", false, session, true) + if !terminalWritten || retryable { + t.Fatalf("expected cancelled attempt to terminate without retry, got terminalWritten=%v retryable=%v", terminalWritten, retryable) + } + if got, want := streamRuntime.finalErrorCode, string(stream.StopReasonContextCancelled); got != want { + t.Fatalf("expected cancelled final error code %q, got %q", want, got) + } + if streamRuntime.finalErrorMessage == "" { + t.Fatalf("expected cancelled final error message to be preserved") + } + + snapshot, err := historyStore.Snapshot() + if err != nil { + t.Fatalf("snapshot failed: %v", err) + } + if len(snapshot.Items) != 1 { + t.Fatalf("expected one history item, got %d", len(snapshot.Items)) + } + full, err := historyStore.Get(snapshot.Items[0].ID) + if err != nil { + t.Fatalf("get detail failed: %v", err) + } + if full.Status != "stopped" { + t.Fatalf("expected stopped status, got %#v", full) + } +} diff --git a/internal/httpapi/openai/responses/empty_retry_runtime.go b/internal/httpapi/openai/responses/empty_retry_runtime.go index 45d861d..25131e1 100644 --- a/internal/httpapi/openai/responses/empty_retry_runtime.go +++ b/internal/httpapi/openai/responses/empty_retry_runtime.go @@ -222,8 +222,11 @@ func (h *Handler) consumeResponsesStreamAttempt(r *http.Request, resp *http.Resp finalReason = "content_filter" } }, + OnContextDone: func() { + streamRuntime.markContextCancelled() + }, }) - if r.Context().Err() != nil { + if streamRuntime.finalErrorCode == string(streamengine.StopReasonContextCancelled) { return true, false } terminalWritten := streamRuntime.finalize(finalReason, allowDeferEmpty && finalReason != "content_filter") @@ -238,6 +241,10 @@ func logResponsesStreamTerminal(streamRuntime *responsesStreamRuntime, attempts if attempts > 0 { source = "synthetic_retry" } + if streamRuntime.finalErrorCode == string(streamengine.StopReasonContextCancelled) { + config.Logger.Info("[openai_empty_retry] terminal cancelled", "surface", "responses", "stream", true, "retry_attempts", attempts, "error_code", streamRuntime.finalErrorCode) + return + } if streamRuntime.failed { config.Logger.Info("[openai_empty_retry] terminal empty output", "surface", "responses", "stream", true, "retry_attempts", attempts, "success_source", "none", "error_code", streamRuntime.finalErrorCode) return diff --git a/internal/httpapi/openai/responses/empty_retry_runtime_test.go b/internal/httpapi/openai/responses/empty_retry_runtime_test.go new file mode 100644 index 0000000..c40e983 --- /dev/null +++ b/internal/httpapi/openai/responses/empty_retry_runtime_test.go @@ -0,0 +1,70 @@ +package responses + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "ds2api/internal/promptcompat" + "ds2api/internal/stream" +) + +func makeResponsesOpenAISSEHTTPResponse(lines ...string) *http.Response { + body := strings.Join(lines, "\n") + if !strings.HasSuffix(body, "\n") { + body += "\n" + } + return &http.Response{ + StatusCode: http.StatusOK, + Header: make(http.Header), + Body: io.NopCloser(strings.NewReader(body)), + } +} + +func TestConsumeResponsesStreamAttemptMarksContextCancelledState(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + req := httptest.NewRequest(http.MethodPost, "/v1/responses", nil).WithContext(ctx) + rec := httptest.NewRecorder() + streamRuntime := newResponsesStreamRuntime( + rec, + http.NewResponseController(rec), + true, + "resp-cancelled", + "deepseek-v4-flash", + "prompt", + false, + false, + true, + nil, + nil, + false, + false, + promptcompat.DefaultToolChoicePolicy(), + "", + nil, + ) + resp := makeResponsesOpenAISSEHTTPResponse( + `data: {"p":"response/content","v":"hello"}`, + `data: [DONE]`, + ) + + h := &Handler{} + terminalWritten, retryable := h.consumeResponsesStreamAttempt(req, resp, streamRuntime, "text", false, true) + if !terminalWritten || retryable { + t.Fatalf("expected cancelled attempt to terminate without retry, got terminalWritten=%v retryable=%v", terminalWritten, retryable) + } + if !streamRuntime.failed { + t.Fatalf("expected cancelled response stream to be marked failed") + } + if got, want := streamRuntime.finalErrorCode, string(stream.StopReasonContextCancelled); got != want { + t.Fatalf("expected cancelled final error code %q, got %q", want, got) + } + if streamRuntime.finalErrorMessage == "" { + t.Fatalf("expected cancelled final error message to be preserved") + } +} diff --git a/internal/httpapi/openai/responses/responses_stream_runtime_core.go b/internal/httpapi/openai/responses/responses_stream_runtime_core.go index 7184c3f..a4749c0 100644 --- a/internal/httpapi/openai/responses/responses_stream_runtime_core.go +++ b/internal/httpapi/openai/responses/responses_stream_runtime_core.go @@ -139,6 +139,13 @@ func (s *responsesStreamRuntime) failResponse(status int, message, code string) s.sendDone() } +func (s *responsesStreamRuntime) markContextCancelled() { + s.failed = true + s.finalErrorStatus = 499 + s.finalErrorMessage = "request context cancelled" + s.finalErrorCode = string(streamengine.StopReasonContextCancelled) +} + func (s *responsesStreamRuntime) finalize(finishReason string, deferEmptyOutput bool) bool { s.failed = false s.finalErrorStatus = 0 From 76ee2faa12377cf16ad37365bc251fa36dcc5aba Mon Sep 17 00:00:00 2001 From: CJACK Date: Fri, 1 May 2026 23:44:07 +0800 Subject: [PATCH 08/15] chore: bump version to 4.2.2 and update documentation to reflect improved release workflows, CI dependencies, and project structure --- README.MD | 8 ++++---- README.en.md | 8 ++++---- VERSION | 2 +- docs/CONTRIBUTING.en.md | 2 +- docs/CONTRIBUTING.md | 2 +- docs/DEPLOY.en.md | 6 +++--- docs/DEPLOY.md | 6 +++--- docs/README.md | 4 ++-- 8 files changed, 19 insertions(+), 19 deletions(-) diff --git a/README.MD b/README.MD index ebb8a70..a596f95 100644 --- a/README.MD +++ b/README.MD @@ -17,7 +17,7 @@ 语言 / Language: [中文](README.MD) | [English](README.en.md) -将 DeepSeek Web 对话能力转换为 OpenAI、Claude 与 Gemini 兼容 API。后端为 **Go 全量实现**,前端为 React WebUI 管理台(源码在 `webui/`,部署时自动构建到 `static/admin`)。 +将 DeepSeek Web 对话能力转换为 OpenAI、Claude 与 Gemini 兼容 API。核心后端以 **Go** 实现,Vercel 流式桥接额外使用少量 Node Runtime,前端为 React WebUI 管理台(源码在 `webui/`,部署时自动构建到 `static/admin`)。 文档入口:[文档导航](docs/README.md) / [架构说明](docs/ARCHITECTURE.md) / [接口文档](API.md) @@ -424,10 +424,10 @@ npm run build --prefix webui 工作流文件:`.github/workflows/release-artifacts.yml` -- **触发条件**:仅在 GitHub Release `published` 时触发(普通 push 不会触发) -- **构建产物**:多平台二进制包(`linux/amd64`、`linux/arm64`、`linux/armv7`、`darwin/amd64`、`darwin/arm64`、`windows/amd64`、`windows/arm64`)+ `sha256sums.txt` +- **触发条件**:默认仅在 GitHub Release `published` 时自动触发;也支持在 Actions 页面手动 `workflow_dispatch`,并填写 `release_tag` 复跑/补发 +- **构建产物**:多平台二进制包(`linux/amd64`、`linux/arm64`、`linux/armv7`、`darwin/amd64`、`darwin/arm64`、`windows/amd64`、`windows/arm64`)、Linux Docker 镜像导出包 + `sha256sums.txt` - **容器镜像发布**:仅推送到 GHCR(`ghcr.io/cjackhwang/ds2api`) -- **每个压缩包包含**:`ds2api` 可执行文件、`static/admin`、WASM 文件(同时支持内置 fallback)、`config.example.json` 配置示例、README、LICENSE +- **每个二进制压缩包包含**:`ds2api` 可执行文件、`static/admin`、`config.example.json`、`.env.example`、`README.MD`、`README.en.md`、`LICENSE` ## 免责声明 diff --git a/README.en.md b/README.en.md index 267f7b1..773d96f 100644 --- a/README.en.md +++ b/README.en.md @@ -16,7 +16,7 @@ Language: [中文](README.MD) | [English](README.en.md) -DS2API converts DeepSeek Web chat capability into OpenAI-compatible, Claude-compatible, and Gemini-compatible APIs. The backend is a **pure Go implementation**, with a React WebUI admin panel (source in `webui/`, build output auto-generated to `static/admin` during deployment). +DS2API converts DeepSeek Web chat capability into OpenAI-compatible, Claude-compatible, and Gemini-compatible APIs. The core backend is Go-based, with a small Node Runtime bridge used for Vercel streaming, and the React WebUI admin panel lives in `webui/` (build output auto-generated to `static/admin` during deployment). Documentation entry: [Docs Index](docs/README.md) / [Architecture](docs/ARCHITECTURE.en.md) / [API Reference](API.en.md) @@ -409,10 +409,10 @@ npm run build --prefix webui Workflow: `.github/workflows/release-artifacts.yml` -- **Trigger**: only on GitHub Release `published` (normal pushes do not trigger builds) -- **Outputs**: multi-platform archives (`linux/amd64`, `linux/arm64`, `linux/armv7`, `darwin/amd64`, `darwin/arm64`, `windows/amd64`, `windows/arm64`) + `sha256sums.txt` +- **Trigger**: by default only on GitHub Release `published`; you can also run it manually via `workflow_dispatch` and pass `release_tag` to rerun / backfill +- **Outputs**: multi-platform binary archives (`linux/amd64`, `linux/arm64`, `linux/armv7`, `darwin/amd64`, `darwin/arm64`, `windows/amd64`, `windows/arm64`), Linux Docker image export tarballs, and `sha256sums.txt` - **Container publishing**: GHCR only (`ghcr.io/cjackhwang/ds2api`) -- **Each archive includes**: `ds2api` executable, `static/admin`, WASM file (with embedded fallback support), `config.example.json`-based config template, README, LICENSE +- **Each binary archive includes**: the `ds2api` executable, `static/admin`, `config.example.json`, `.env.example`, `README.MD`, `README.en.md`, and `LICENSE` ## Disclaimer diff --git a/VERSION b/VERSION index fae6e3d..af8c8ec 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -4.2.1 +4.2.2 diff --git a/docs/CONTRIBUTING.en.md b/docs/CONTRIBUTING.en.md index 8dd9a40..94cade1 100644 --- a/docs/CONTRIBUTING.en.md +++ b/docs/CONTRIBUTING.en.md @@ -36,7 +36,7 @@ go run ./cmd/ds2api cd webui # 2. Install dependencies -npm install +npm ci # 3. Start dev server (hot reload) npm run dev diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 0a9187d..69424d4 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -36,7 +36,7 @@ go run ./cmd/ds2api cd webui # 2. 安装依赖 -npm install +npm ci # 3. 启动开发服务器(热更新) npm run dev diff --git a/docs/DEPLOY.en.md b/docs/DEPLOY.en.md index b2ed75e..7390097 100644 --- a/docs/DEPLOY.en.md +++ b/docs/DEPLOY.en.md @@ -64,8 +64,8 @@ Use `config.json` as the single source of truth: Built-in GitHub Actions workflow: `.github/workflows/release-artifacts.yml` -- **Trigger**: only on Release `published` (no build on normal push) -- **Outputs**: multi-platform binary archives + `sha256sums.txt` +- **Trigger**: by default only on Release `published`; you can also run it manually via `workflow_dispatch` and pass `release_tag` to rerun / backfill +- **Outputs**: multi-platform binary archives, Linux Docker image export tarballs, and `sha256sums.txt` - **Container publishing**: GHCR only (`ghcr.io/cjackhwang/ds2api`) | Platform | Architecture | Format | @@ -419,7 +419,7 @@ Or step by step: ```bash cd webui -npm install +npm ci npm run build # Output goes to static/admin/ ``` diff --git a/docs/DEPLOY.md b/docs/DEPLOY.md index 4a1b75f..29e3d0f 100644 --- a/docs/DEPLOY.md +++ b/docs/DEPLOY.md @@ -64,8 +64,8 @@ cp config.example.json config.json 仓库内置 GitHub Actions 工作流:`.github/workflows/release-artifacts.yml` -- **触发条件**:仅在 Release `published` 时触发(普通 push 不会构建) -- **构建产物**:多平台二进制压缩包 + `sha256sums.txt` +- **触发条件**:默认仅在 Release `published` 时自动触发;也支持在 Actions 页面手动 `workflow_dispatch`,并填写 `release_tag` 复跑/补发 +- **构建产物**:多平台二进制压缩包、Linux Docker 镜像导出包 + `sha256sums.txt` - **容器镜像发布**:仅发布到 GHCR(`ghcr.io/cjackhwang/ds2api`) | 平台 | 架构 | 文件格式 | @@ -429,7 +429,7 @@ go run ./cmd/ds2api ```bash cd webui -npm install +npm ci npm run build # 产物输出到 static/admin/ ``` diff --git a/docs/README.md b/docs/README.md index b3556eb..426c343 100644 --- a/docs/README.md +++ b/docs/README.md @@ -22,7 +22,7 @@ ### 文档维护约定 -- 文档更新必须以实际代码实现为依据:总路由装配看 `internal/server/router.go`,协议/resource 路由看 `internal/httpapi/*/**/routes.go` 与 `internal/httpapi/admin/handler.go`,配置默认值看 `internal/config/*`,模型/alias 看 `internal/config/models.go`,prompt 兼容链路看 `docs/prompt-compatibility.md` 列出的代码入口。 +- 文档更新必须以实际代码实现为依据:总路由装配看 `internal/server/router.go`,协议/resource 路由看 `internal/httpapi/**/handler*.go` 与 `internal/httpapi/admin/handler.go`,配置默认值看 `internal/config/*`,模型/alias 看 `internal/config/models.go`,prompt 兼容链路看 `docs/prompt-compatibility.md` 列出的代码入口。 - `README.MD` / `README.en.md`:面向首次接触用户,保留“是什么 + 怎么快速跑起来”。 - `docs/ARCHITECTURE*.md`:面向开发者,集中维护项目结构、模块职责与调用链。 - `API*.md`:面向客户端接入者,聚焦接口行为、鉴权和示例。 @@ -53,7 +53,7 @@ Recommended reading order: ### Maintenance conventions -- Documentation updates must be grounded in the actual implementation: root routing lives in `internal/server/router.go`, protocol/resource routes live in `internal/httpapi/*/**/routes.go` and `internal/httpapi/admin/handler.go`, config defaults in `internal/config/*`, models/aliases in `internal/config/models.go`, and the prompt compatibility pipeline in the code entrypoints listed by `docs/prompt-compatibility.md`. +- Documentation updates must be grounded in the actual implementation: root routing lives in `internal/server/router.go`, protocol/resource routes live in `internal/httpapi/**/handler*.go` and `internal/httpapi/admin/handler.go`, config defaults in `internal/config/*`, models/aliases in `internal/config/models.go`, and the prompt compatibility pipeline in the code entrypoints listed by `docs/prompt-compatibility.md`. - `README.MD` / `README.en.md`: onboarding-oriented (“what + quick start”). - `docs/ARCHITECTURE*.md`: developer-oriented source of truth for module boundaries and execution flow. - `API*.md`: integration-oriented behavior/contracts. From 55abf647179ee5d57f621ad942ebfb9d976cdd98 Mon Sep 17 00:00:00 2001 From: CJACK Date: Sat, 2 May 2026 00:55:17 +0800 Subject: [PATCH 09/15] feat: add model type support for file uploads with automatic resolution and header propagation --- docs/prompt-compatibility.md | 8 +++++ internal/deepseek/client/client_upload.go | 8 +++++ .../deepseek/client/client_upload_test.go | 6 ++++ .../httpapi/openai/file_inline_upload_test.go | 13 +++++++-- .../openai/files/file_inline_upload.go | 12 ++++++++ .../httpapi/openai/files/handler_files.go | 29 +++++++++++++++++++ internal/httpapi/openai/files_route_test.go | 14 +++++++-- .../openai/history/current_input_file.go | 6 ++++ internal/httpapi/openai/history_split_test.go | 9 ++++-- .../features/apiTester/ApiTesterContainer.jsx | 1 + webui/src/features/apiTester/ChatPanel.jsx | 8 ++++- 11 files changed, 105 insertions(+), 9 deletions(-) diff --git a/docs/prompt-compatibility.md b/docs/prompt-compatibility.md index 58c2c6c..fcc70a5 100644 --- a/docs/prompt-compatibility.md +++ b/docs/prompt-compatibility.md @@ -238,6 +238,14 @@ OpenAI 文件相关实现: - 文件 ID 收集: [internal/promptcompat/file_refs.go](../internal/promptcompat/file_refs.go) +OpenAI 的文件上传现在不再是“只传文件本体”的通用路径,而是会先根据请求里的 `model` 解析出 DeepSeek 的上传类型,并把它透传到上传接口的 `x-model-type`。当前可见的上传类型就是 `default` / `expert` / `vision`,其中 vision 请求上传图片时必须带上 `vision`,否则下游容易退回到仅文本或 OCR 语义。这个模型类型会同时用于: + +- `/v1/files` 这类独立文件上传入口 +- Chat / Responses 的 inline 图片、附件上传 +- current input file 触发时生成的 `DS2API_HISTORY.txt` 上下文文件 + +也就是说,文件上传和完成请求的 `model_type` 现在是一致的:完成 payload 里仍然是 `model_type`,上传文件则会在 DeepSeek 上传阶段携带同样的模型类型信息。 + 结论: - “systemprompt 文字”在 prompt 里 diff --git a/internal/deepseek/client/client_upload.go b/internal/deepseek/client/client_upload.go index 9e95a23..c3334c3 100644 --- a/internal/deepseek/client/client_upload.go +++ b/internal/deepseek/client/client_upload.go @@ -23,6 +23,7 @@ type UploadFileRequest struct { Filename string ContentType string Purpose string + ModelType string Data []byte } @@ -54,6 +55,7 @@ func (c *Client) UploadFile(ctx context.Context, a *auth.RequestAuth, req Upload contentType = "application/octet-stream" } purpose := strings.TrimSpace(req.Purpose) + modelType := strings.ToLower(strings.TrimSpace(req.ModelType)) body, contentTypeHeader, err := buildUploadMultipartBody(filename, contentType, req.Data) if err != nil { return nil, err @@ -64,6 +66,9 @@ func (c *Client) UploadFile(ctx context.Context, a *auth.RequestAuth, req Upload "purpose": purpose, "bytes": len(req.Data), } + if modelType != "" { + capturePayload["model_type"] = modelType + } captureSession := c.capture.Start("deepseek_upload_file", dsprotocol.DeepSeekUploadFileURL, a.AccountID, capturePayload) attempts := 0 refreshed := false @@ -81,6 +86,9 @@ func (c *Client) UploadFile(ctx context.Context, a *auth.RequestAuth, req Upload } headers := c.authHeaders(a.DeepSeekToken) headers["Content-Type"] = contentTypeHeader + if modelType != "" { + headers["x-model-type"] = modelType + } headers["x-ds-pow-response"] = powHeader headers["x-file-size"] = strconv.Itoa(len(req.Data)) headers["x-thinking-enabled"] = "1" diff --git a/internal/deepseek/client/client_upload_test.go b/internal/deepseek/client/client_upload_test.go index 90e11cd..e7d1cc0 100644 --- a/internal/deepseek/client/client_upload_test.go +++ b/internal/deepseek/client/client_upload_test.go @@ -82,6 +82,7 @@ func TestUploadFileUsesUploadTargetPowAndMultipartHeaders(t *testing.T) { var seenTargetPath string var seenContentType string var seenFileSize string + var seenModelType string var seenBody string call := 0 client := &Client{ @@ -96,6 +97,7 @@ func TestUploadFileUsesUploadTargetPowAndMultipartHeaders(t *testing.T) { seenPow = req.Header.Get("x-ds-pow-response") seenContentType = req.Header.Get("Content-Type") seenFileSize = req.Header.Get("x-file-size") + seenModelType = req.Header.Get("x-model-type") seenBody = string(bodyBytes) return &http.Response{StatusCode: http.StatusOK, Header: make(http.Header), Body: io.NopCloser(strings.NewReader(uploadResponse)), Request: req}, nil default: @@ -112,6 +114,7 @@ func TestUploadFileUsesUploadTargetPowAndMultipartHeaders(t *testing.T) { Filename: "demo.txt", ContentType: "text/plain", Purpose: "assistants", + ModelType: "vision", Data: []byte("hello"), }, 1) if err != nil { @@ -140,6 +143,9 @@ func TestUploadFileUsesUploadTargetPowAndMultipartHeaders(t *testing.T) { if seenFileSize != "5" { t.Fatalf("expected x-file-size=5, got %q", seenFileSize) } + if seenModelType != "vision" { + t.Fatalf("expected x-model-type=vision, got %q", seenModelType) + } if !strings.HasPrefix(seenContentType, "multipart/form-data; boundary=") { t.Fatalf("expected multipart content type, got %q", seenContentType) } diff --git a/internal/httpapi/openai/file_inline_upload_test.go b/internal/httpapi/openai/file_inline_upload_test.go index fa399b8..8194aeb 100644 --- a/internal/httpapi/openai/file_inline_upload_test.go +++ b/internal/httpapi/openai/file_inline_upload_test.go @@ -94,6 +94,9 @@ func TestPreprocessInlineFileInputsReplacesDataURLAndCollectsRefFileIDs(t *testi if len(ds.uploadCalls) != 1 { t.Fatalf("expected 1 upload, got %d", len(ds.uploadCalls)) } + if ds.uploadCalls[0].ModelType != "default" { + t.Fatalf("expected default model type when request omits model, got %q", ds.uploadCalls[0].ModelType) + } if ds.lastCtx != ctx { t.Fatalf("expected upload to use request context") } @@ -149,7 +152,7 @@ func TestPreprocessInlineFileInputsDeduplicatesIdenticalPayloads(t *testing.T) { func TestChatCompletionsUploadsInlineFilesBeforeCompletion(t *testing.T) { ds := &inlineUploadDSStub{} h := &openAITestSurface{Store: mockOpenAIConfig{wideInput: true}, Auth: streamStatusAuthStub{}, DS: ds} - reqBody := `{"model":"deepseek-v4-flash","messages":[{"role":"user","content":[{"type":"input_text","text":"hi"},{"type":"image_url","image_url":{"url":"data:image/png;base64,QUJDRA=="}}]}],"stream":false}` + reqBody := `{"model":"deepseek-v4-vision","messages":[{"role":"user","content":[{"type":"input_text","text":"hi"},{"type":"image_url","image_url":{"url":"data:image/png;base64,QUJDRA=="}}]}],"stream":false}` req := httptest.NewRequest(http.MethodPost, "/v1/chat/completions", strings.NewReader(reqBody)) req.Header.Set("Authorization", "Bearer direct-token") req.Header.Set("Content-Type", "application/json") @@ -163,6 +166,9 @@ func TestChatCompletionsUploadsInlineFilesBeforeCompletion(t *testing.T) { if len(ds.uploadCalls) != 1 { t.Fatalf("expected 1 upload call, got %d", len(ds.uploadCalls)) } + if ds.uploadCalls[0].ModelType != "vision" { + t.Fatalf("expected vision model type for vision request, got %q", ds.uploadCalls[0].ModelType) + } if ds.completionReq == nil { t.Fatal("expected completion payload to be captured") } @@ -177,7 +183,7 @@ func TestResponsesUploadsInlineFilesBeforeCompletion(t *testing.T) { h := &openAITestSurface{Store: mockOpenAIConfig{wideInput: true}, Auth: streamStatusAuthStub{}, DS: ds} r := chi.NewRouter() registerOpenAITestRoutes(r, h) - reqBody := `{"model":"deepseek-v4-flash","input":[{"role":"user","content":[{"type":"input_text","text":"hi"},{"type":"input_image","image_url":{"url":"data:image/png;base64,QUJDRA=="}}]}],"stream":false}` + reqBody := `{"model":"deepseek-v4-pro","input":[{"role":"user","content":[{"type":"input_text","text":"hi"},{"type":"input_image","image_url":{"url":"data:image/png;base64,QUJDRA=="}}]}],"stream":false}` req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(reqBody)) req.Header.Set("Authorization", "Bearer direct-token") req.Header.Set("Content-Type", "application/json") @@ -191,6 +197,9 @@ func TestResponsesUploadsInlineFilesBeforeCompletion(t *testing.T) { if len(ds.uploadCalls) != 1 { t.Fatalf("expected 1 upload call, got %d", len(ds.uploadCalls)) } + if ds.uploadCalls[0].ModelType != "expert" { + t.Fatalf("expected expert model type for pro request, got %q", ds.uploadCalls[0].ModelType) + } refIDs, _ := ds.completionReq["ref_file_ids"].([]any) if len(refIDs) != 1 || refIDs[0] != "file-inline-1" { t.Fatalf("unexpected completion ref_file_ids: %#v", ds.completionReq["ref_file_ids"]) diff --git a/internal/httpapi/openai/files/file_inline_upload.go b/internal/httpapi/openai/files/file_inline_upload.go index a16fe52..bb3ddce 100644 --- a/internal/httpapi/openai/files/file_inline_upload.go +++ b/internal/httpapi/openai/files/file_inline_upload.go @@ -12,6 +12,7 @@ import ( "strings" "ds2api/internal/auth" + "ds2api/internal/config" dsclient "ds2api/internal/deepseek/client" "ds2api/internal/httpapi/openai/shared" "ds2api/internal/promptcompat" @@ -42,6 +43,7 @@ type inlineUploadState struct { ctx context.Context handler *Handler auth *auth.RequestAuth + modelType string uploadedByID map[string]string uploadCount int inlineFileBytes int @@ -58,10 +60,19 @@ func (h *Handler) PreprocessInlineFileInputs(ctx context.Context, a *auth.Reques if h == nil || h.DS == nil || len(req) == 0 { return nil } + modelType := "default" + if requestedModel, ok := req["model"].(string); ok { + if resolvedModel, ok := config.ResolveModel(h.Store, requestedModel); ok { + if resolvedType, ok := config.GetModelType(resolvedModel); ok { + modelType = resolvedType + } + } + } state := &inlineUploadState{ ctx: ctx, handler: h, auth: a, + modelType: modelType, uploadedByID: map[string]string{}, } for _, key := range []string{"messages", "input", "attachments"} { @@ -174,6 +185,7 @@ func (s *inlineUploadState) uploadInlineFile(file inlineDecodedFile) (string, er result, err := s.handler.DS.UploadFile(s.ctx, s.auth, dsclient.UploadFileRequest{ Filename: file.Filename, ContentType: contentType, + ModelType: s.modelType, Data: file.Data, }, 3) if err != nil { diff --git a/internal/httpapi/openai/files/handler_files.go b/internal/httpapi/openai/files/handler_files.go index edfb653..5365409 100644 --- a/internal/httpapi/openai/files/handler_files.go +++ b/internal/httpapi/openai/files/handler_files.go @@ -8,6 +8,7 @@ import ( "ds2api/internal/auth" "ds2api/internal/chathistory" + "ds2api/internal/config" dsclient "ds2api/internal/deepseek/client" "ds2api/internal/httpapi/openai/shared" ) @@ -66,10 +67,12 @@ func (h *Handler) UploadFile(w http.ResponseWriter, r *http.Request) { if contentType == "" && len(data) > 0 { contentType = http.DetectContentType(data) } + modelType := resolveUploadModelType(h.Store, r) result, err := h.DS.UploadFile(r.Context(), a, dsclient.UploadFileRequest{ Filename: header.Filename, ContentType: contentType, Purpose: strings.TrimSpace(r.FormValue("purpose")), + ModelType: modelType, Data: data, }, 3) if err != nil { @@ -82,6 +85,32 @@ func (h *Handler) UploadFile(w http.ResponseWriter, r *http.Request) { shared.WriteJSON(w, http.StatusOK, buildOpenAIFileObject(result)) } +func resolveUploadModelType(store shared.ConfigReader, r *http.Request) string { + for _, candidate := range []string{r.FormValue("model_type"), r.Header.Get("X-Model-Type")} { + if modelType := normalizeUploadModelType(candidate); modelType != "" { + return modelType + } + } + requestedModel := strings.TrimSpace(r.FormValue("model")) + if requestedModel != "" { + if resolvedModel, ok := config.ResolveModel(store, requestedModel); ok { + if modelType, ok := config.GetModelType(resolvedModel); ok { + return modelType + } + } + } + return "default" +} + +func normalizeUploadModelType(raw string) string { + switch strings.ToLower(strings.TrimSpace(raw)) { + case "default", "expert", "vision": + return strings.ToLower(strings.TrimSpace(raw)) + default: + return "" + } +} + func buildOpenAIFileObject(result *dsclient.UploadFileResult) map[string]any { if result == nil { obj := map[string]any{ diff --git a/internal/httpapi/openai/files_route_test.go b/internal/httpapi/openai/files_route_test.go index 2b9c205..f365dc3 100644 --- a/internal/httpapi/openai/files_route_test.go +++ b/internal/httpapi/openai/files_route_test.go @@ -77,7 +77,7 @@ func (m *filesRouteDSStub) DeleteAllSessionsForToken(_ context.Context, _ string return nil } -func newMultipartUploadRequest(t *testing.T, purpose string, filename string, data []byte) *http.Request { +func newMultipartUploadRequest(t *testing.T, purpose string, filename string, data []byte, model string) *http.Request { t.Helper() var body bytes.Buffer writer := multipart.NewWriter(&body) @@ -86,6 +86,11 @@ func newMultipartUploadRequest(t *testing.T, purpose string, filename string, da t.Fatalf("write purpose failed: %v", err) } } + if model != "" { + if err := writer.WriteField("model", model); err != nil { + t.Fatalf("write model failed: %v", err) + } + } part, err := writer.CreateFormFile("file", filename) if err != nil { t.Fatalf("create form file failed: %v", err) @@ -108,7 +113,7 @@ func TestFilesRouteUploadSuccess(t *testing.T) { r := chi.NewRouter() registerOpenAITestRoutes(r, h) - req := newMultipartUploadRequest(t, "assistants", "notes.txt", []byte("hello world")) + req := newMultipartUploadRequest(t, "assistants", "notes.txt", []byte("hello world"), "deepseek-v4-vision") rec := httptest.NewRecorder() r.ServeHTTP(rec, req) @@ -121,6 +126,9 @@ func TestFilesRouteUploadSuccess(t *testing.T) { if ds.lastReq.Purpose != "assistants" { t.Fatalf("expected purpose assistants, got %q", ds.lastReq.Purpose) } + if ds.lastReq.ModelType != "vision" { + t.Fatalf("expected vision model type, got %q", ds.lastReq.ModelType) + } if string(ds.lastReq.Data) != "hello world" { t.Fatalf("unexpected uploaded data: %q", string(ds.lastReq.Data)) } @@ -145,7 +153,7 @@ func TestFilesRouteUploadIncludesAccountIDForManagedAccount(t *testing.T) { r := chi.NewRouter() registerOpenAITestRoutes(r, h) - req := newMultipartUploadRequest(t, "assistants", "notes.txt", []byte("hello world")) + req := newMultipartUploadRequest(t, "assistants", "notes.txt", []byte("hello world"), "deepseek-v4-vision") rec := httptest.NewRecorder() r.ServeHTTP(rec, req) diff --git a/internal/httpapi/openai/history/current_input_file.go b/internal/httpapi/openai/history/current_input_file.go index 648331c..1763276 100644 --- a/internal/httpapi/openai/history/current_input_file.go +++ b/internal/httpapi/openai/history/current_input_file.go @@ -7,6 +7,7 @@ import ( "strings" "ds2api/internal/auth" + "ds2api/internal/config" dsclient "ds2api/internal/deepseek/client" "ds2api/internal/httpapi/openai/shared" "ds2api/internal/promptcompat" @@ -35,10 +36,15 @@ func (s Service) ApplyCurrentInputFile(ctx context.Context, a *auth.RequestAuth, if strings.TrimSpace(fileText) == "" { return stdReq, errors.New("current user input file produced empty transcript") } + modelType := "default" + if resolvedType, ok := config.GetModelType(stdReq.ResolvedModel); ok { + modelType = resolvedType + } result, err := s.DS.UploadFile(ctx, a, dsclient.UploadFileRequest{ Filename: currentInputFilename, ContentType: currentInputContentType, Purpose: currentInputPurpose, + ModelType: modelType, Data: []byte(fileText), }, 3) if err != nil { diff --git a/internal/httpapi/openai/history_split_test.go b/internal/httpapi/openai/history_split_test.go index d223689..9e5bdd9 100644 --- a/internal/httpapi/openai/history_split_test.go +++ b/internal/httpapi/openai/history_split_test.go @@ -227,7 +227,7 @@ func TestApplyCurrentInputFileDisabledPassThrough(t *testing.T) { DS: ds, } req := map[string]any{ - "model": "deepseek-v4-flash", + "model": "deepseek-v4-vision", "messages": historySplitTestMessages(), } stdReq, err := promptcompat.NormalizeOpenAIChatRequest(h.Store, req, "") @@ -332,7 +332,7 @@ func TestApplyCurrentInputFilePreservesFullContextPromptForTokenCounting(t *test DS: ds, } req := map[string]any{ - "model": "deepseek-v4-flash", + "model": "deepseek-v4-vision", "messages": historySplitTestMessages(), } stdReq, err := promptcompat.NormalizeOpenAIChatRequest(h.Store, req, "") @@ -378,7 +378,7 @@ func TestApplyCurrentInputFileUploadsFullContextFile(t *testing.T) { DS: ds, } req := map[string]any{ - "model": "deepseek-v4-flash", + "model": "deepseek-v4-vision", "messages": historySplitTestMessages(), } stdReq, err := promptcompat.NormalizeOpenAIChatRequest(h.Store, req, "") @@ -400,6 +400,9 @@ func TestApplyCurrentInputFileUploadsFullContextFile(t *testing.T) { if upload.Filename != "DS2API_HISTORY.txt" { t.Fatalf("expected DS2API_HISTORY.txt upload, got %q", upload.Filename) } + if upload.ModelType != "vision" { + t.Fatalf("expected vision model type for vision request, got %q", upload.ModelType) + } uploadedText := string(upload.Data) for _, want := range []string{"# DS2API_HISTORY.txt", "=== 1. SYSTEM ===", "=== 2. USER ===", "=== 3. ASSISTANT ===", "=== 4. TOOL ===", "=== 5. USER ===", "system instructions", "first user turn", "hidden reasoning", "tool result", "latest user turn", promptcompat.ThinkingInjectionMarker} { if !strings.Contains(uploadedText, want) { diff --git a/webui/src/features/apiTester/ApiTesterContainer.jsx b/webui/src/features/apiTester/ApiTesterContainer.jsx index dabd049..dce018e 100644 --- a/webui/src/features/apiTester/ApiTesterContainer.jsx +++ b/webui/src/features/apiTester/ApiTesterContainer.jsx @@ -217,6 +217,7 @@ export default function ApiTesterContainer({ config, onMessage, authFetch }) { setSelectedAccount={setSelectedAccount} effectiveKey={effectiveKey} selectedAccount={selectedAccount} + model={model} onMessage={onMessage} response={response} isStreaming={isStreaming} diff --git a/webui/src/features/apiTester/ChatPanel.jsx b/webui/src/features/apiTester/ChatPanel.jsx index 32b160e..e4d1428 100644 --- a/webui/src/features/apiTester/ChatPanel.jsx +++ b/webui/src/features/apiTester/ChatPanel.jsx @@ -13,6 +13,7 @@ export default function ChatPanel({ setSelectedAccount, effectiveKey, selectedAccount, + model, onMessage, response, isStreaming, @@ -37,11 +38,15 @@ export default function ChatPanel({ setUploadingFiles(true) const initialSelectedAccount = String(selectedAccount || '').trim() + const selectedModel = String(model || '').trim() let boundAccount = initialSelectedAccount for (const file of files) { const formData = new FormData() formData.append('file', file) formData.append('purpose', 'assistants') + if (selectedModel) { + formData.append('model', selectedModel) + } const headers = { 'Authorization': `Bearer ${effectiveKey}`, @@ -181,8 +186,9 @@ export default function ChatPanel({ />