feat: Add support for parsing dictionary-based SSE fragments and prevent duplicate stream termination messages.

This commit is contained in:
CJACK
2026-02-13 22:44:39 +08:00
parent ee0b7f08a0
commit 648b80bb7b
3 changed files with 29 additions and 2 deletions

View File

@@ -32,9 +32,14 @@ logger = logging.getLogger("ds2api")
# -------------------------- 初始化 tokenizer --------------------------
chat_tokenizer_dir = resolve_path("DS2API_TOKENIZER_DIR", "")
# 抑制 Mistral tokenizer regex 警告(不影响 DeepSeek tokenization
_tf_logger = logging.getLogger("transformers")
_tf_log_level = _tf_logger.level
_tf_logger.setLevel(logging.ERROR)
tokenizer = transformers.AutoTokenizer.from_pretrained(
chat_tokenizer_dir, trust_remote_code=True
)
_tf_logger.setLevel(_tf_log_level)
# ----------------------------------------------------------------------
# 配置文件的读写函数

View File

@@ -255,6 +255,26 @@ def parse_sse_chunk_for_content(
return ([], True, new_fragment_type)
contents.extend(result)
# 处理字典值(初始响应 chunk包含 response.fragments
elif isinstance(v_value, dict):
response_obj = v_value.get("response", v_value)
fragments = response_obj.get("fragments", [])
if isinstance(fragments, list):
for frag in fragments:
if isinstance(frag, dict):
frag_type = frag.get("type", "").upper()
frag_content = frag.get("content", "")
if frag_type == "THINK" or frag_type == "THINKING":
new_fragment_type = "thinking"
if frag_content:
contents.append((frag_content, "thinking"))
elif frag_type == "RESPONSE":
new_fragment_type = "text"
if frag_content:
contents.append((frag_content, "text"))
elif frag_content:
contents.append((frag_content, ptype))
return (contents, False, new_fragment_type)