refactor: Relocate JavaScript source and Node.js test files to dedicated directories and extract OpenAI stream runtime tool call finalization logic.

This commit is contained in:
CJACK
2026-02-22 22:37:08 +08:00
parent d3b60edb6f
commit d4017b87c1
30 changed files with 201 additions and 192 deletions

View File

@@ -0,0 +1,169 @@
'use strict';
const test = require('node:test');
const assert = require('node:assert/strict');
const handler = require('../../api/chat-stream.js');
const {
createToolSieveState,
processToolSieveChunk,
flushToolSieve,
} = require('../../internal/js/helpers/stream-tool-sieve.js');
const {
parseChunkForContent,
resolveToolcallPolicy,
normalizePreparedToolNames,
boolDefaultTrue,
} = handler.__test;
test('chat-stream exposes parser test hooks', () => {
assert.equal(typeof parseChunkForContent, 'function');
assert.equal(typeof resolveToolcallPolicy, 'function');
});
test('resolveToolcallPolicy defaults to feature-match + early emit when prepare flags missing', () => {
const policy = resolveToolcallPolicy(
{},
[{ type: 'function', function: { name: 'read_file', parameters: { type: 'object' } } }],
);
assert.deepEqual(policy.toolNames, ['read_file']);
assert.equal(policy.toolSieveEnabled, true);
assert.equal(policy.emitEarlyToolDeltas, true);
});
test('resolveToolcallPolicy respects prepare flags and prepared tool names', () => {
const policy = resolveToolcallPolicy(
{
tool_names: [' prepped_tool ', '', null],
toolcall_feature_match: false,
toolcall_early_emit_high: false,
},
[{ type: 'function', function: { name: 'fallback_tool', parameters: { type: 'object' } } }],
);
assert.deepEqual(policy.toolNames, ['prepped_tool']);
assert.equal(policy.toolSieveEnabled, false);
assert.equal(policy.emitEarlyToolDeltas, false);
});
test('normalizePreparedToolNames filters empty values', () => {
assert.deepEqual(normalizePreparedToolNames([' a ', '', null, 'b']), ['a', 'b']);
});
test('boolDefaultTrue keeps false only when explicitly false', () => {
assert.equal(boolDefaultTrue(false), false);
assert.equal(boolDefaultTrue(true), true);
assert.equal(boolDefaultTrue(undefined), true);
});
test('parseChunkForContent keeps split response/content fragments inside response array', () => {
const chunk = {
p: 'response',
v: [
{ p: 'response/content', v: '{"' },
{ p: 'response/content', v: 'tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}' },
],
};
const parsed = parseChunkForContent(chunk, false, 'text');
assert.equal(parsed.finished, false);
assert.equal(parsed.newType, 'text');
assert.equal(parsed.parts.length, 2);
const combined = parsed.parts.map((p) => p.text).join('');
assert.equal(combined, '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}');
});
test('parseChunkForContent + sieve does not leak suspicious prefix in split tool json case', () => {
const chunk = {
p: 'response',
v: [
{ p: 'response/content', v: '{"' },
{ p: 'response/content', v: 'tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}' },
],
};
const parsed = parseChunkForContent(chunk, false, 'text');
const state = createToolSieveState();
const events = [];
for (const part of parsed.parts) {
events.push(...processToolSieveChunk(state, part.text, ['read_file']));
}
events.push(...flushToolSieve(state, ['read_file']));
const hasToolCalls = events.some((evt) => evt.type === 'tool_calls' && evt.calls && evt.calls.length > 0);
const hasToolDeltas = events.some((evt) => evt.type === 'tool_call_deltas' && evt.deltas && evt.deltas.length > 0);
const leakedText = events
.filter((evt) => evt.type === 'text' && evt.text)
.map((evt) => evt.text)
.join('');
assert.equal(hasToolCalls || hasToolDeltas, true);
assert.equal(leakedText.includes('{'), false);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false);
});
test('parseChunkForContent consumes nested item.v array payloads', () => {
const chunk = {
p: 'response',
v: [
{ p: 'response/content', v: ['A', 'B'] },
{ p: 'response/content', v: [{ content: 'C', type: 'RESPONSE' }] },
],
};
const parsed = parseChunkForContent(chunk, false, 'text');
assert.equal(parsed.finished, false);
assert.equal(parsed.parts.map((p) => p.text).join(''), 'ABC');
});
test('parseChunkForContent detects nested status FINISHED in array payload', () => {
const chunk = {
p: 'response',
v: [{ p: 'status', v: 'FINISHED' }],
};
const parsed = parseChunkForContent(chunk, false, 'text');
assert.equal(parsed.finished, true);
assert.deepEqual(parsed.parts, []);
});
test('parseChunkForContent ignores items without v to match Go parser behavior', () => {
const chunk = {
p: 'response',
v: [{ type: 'RESPONSE', content: 'no-v-content' }],
};
const parsed = parseChunkForContent(chunk, false, 'text');
assert.equal(parsed.finished, false);
assert.deepEqual(parsed.parts, []);
});
test('parseChunkForContent handles response/fragments APPEND with thinking and response transitions', () => {
const chunk = {
p: 'response/fragments',
o: 'APPEND',
v: [
{ type: 'THINK', content: '思考中' },
{ type: 'RESPONSE', content: '结论' },
],
};
const parsed = parseChunkForContent(chunk, true, 'thinking');
assert.equal(parsed.finished, false);
assert.equal(parsed.newType, 'text');
assert.deepEqual(parsed.parts, [
{ text: '思考中', type: 'thinking' },
{ text: '结论', type: 'text' },
]);
});
test('parseChunkForContent supports wrapped response.fragments object shape', () => {
const chunk = {
p: 'response',
v: {
response: {
fragments: [
{ type: 'RESPONSE', content: 'A' },
{ type: 'RESPONSE', content: 'B' },
],
},
},
};
const parsed = parseChunkForContent(chunk, false, 'text');
assert.equal(parsed.finished, false);
assert.equal(parsed.parts.map((p) => p.text).join(''), 'AB');
});

View File

@@ -0,0 +1,60 @@
'use strict';
const test = require('node:test');
const assert = require('node:assert/strict');
const fs = require('node:fs');
const path = require('node:path');
const chatStream = require('../../api/chat-stream.js');
const { parseToolCalls } = require('../../internal/js/helpers/stream-tool-sieve.js');
const { parseChunkForContent, estimateTokens } = chatStream.__test;
const compatRoot = path.resolve(__dirname, '../../tests/compat');
function readJSON(filePath) {
return JSON.parse(fs.readFileSync(filePath, 'utf8'));
}
test('js compat: sse fixtures', () => {
const fixtureDir = path.join(compatRoot, 'fixtures', 'sse_chunks');
const expectedDir = path.join(compatRoot, 'expected');
const files = fs.readdirSync(fixtureDir).filter((f) => f.endsWith('.json')).sort();
assert.ok(files.length > 0);
for (const file of files) {
const name = file.replace(/\.json$/i, '');
const fixture = readJSON(path.join(fixtureDir, file));
const expected = readJSON(path.join(expectedDir, `sse_${name}.json`));
const got = parseChunkForContent(fixture.chunk, Boolean(fixture.thinking_enabled), fixture.current_type || 'text');
assert.deepEqual(got.parts, expected.parts, `${name}: parts mismatch`);
assert.equal(got.finished, expected.finished, `${name}: finished mismatch`);
assert.equal(got.newType, expected.new_type, `${name}: newType mismatch`);
}
});
test('js compat: toolcall fixtures', () => {
const fixtureDir = path.join(compatRoot, 'fixtures', 'toolcalls');
const expectedDir = path.join(compatRoot, 'expected');
const files = fs.readdirSync(fixtureDir).filter((f) => f.endsWith('.json')).sort();
assert.ok(files.length > 0);
for (const file of files) {
const name = file.replace(/\.json$/i, '');
const fixture = readJSON(path.join(fixtureDir, file));
const expected = readJSON(path.join(expectedDir, `toolcalls_${name}.json`));
const got = parseToolCalls(fixture.text, fixture.tool_names || []);
assert.deepEqual(got, expected.calls, `${name}: calls mismatch`);
}
});
test('js compat: token fixtures', () => {
const fixture = readJSON(path.join(compatRoot, 'fixtures', 'token_cases.json'));
const expected = readJSON(path.join(compatRoot, 'expected', 'token_cases.json'));
const expectedByName = new Map(expected.cases.map((c) => [c.name, c.tokens]));
for (const c of fixture.cases) {
assert.ok(expectedByName.has(c.name), `missing expected case: ${c.name}`);
const got = estimateTokens(c.text);
assert.equal(got, expectedByName.get(c.name), `${c.name}: tokens mismatch`);
}
});

View File

@@ -0,0 +1,195 @@
'use strict';
const test = require('node:test');
const assert = require('node:assert/strict');
const {
extractToolNames,
createToolSieveState,
processToolSieveChunk,
flushToolSieve,
parseToolCalls,
parseStandaloneToolCalls,
} = require('../../internal/js/helpers/stream-tool-sieve.js');
function runSieve(chunks, toolNames) {
const state = createToolSieveState();
const events = [];
for (const chunk of chunks) {
events.push(...processToolSieveChunk(state, chunk, toolNames));
}
events.push(...flushToolSieve(state, toolNames));
return events;
}
function collectText(events) {
return events
.filter((evt) => evt.type === 'text' && evt.text)
.map((evt) => evt.text)
.join('');
}
test('extractToolNames keeps tool mode enabled with unknown fallback', () => {
const names = extractToolNames([
{ function: { description: 'no name tool' } },
{ function: { name: ' read_file ' } },
{},
]);
assert.deepEqual(names, ['unknown', 'read_file', 'unknown']);
});
test('parseToolCalls keeps non-object argument strings as _raw (Go parity)', () => {
const payload = JSON.stringify({
tool_calls: [
{ name: 'read_file', input: '123' },
{ name: 'list_dir', input: '[1,2,3]' },
],
});
const calls = parseToolCalls(payload, ['read_file', 'list_dir']);
assert.deepEqual(calls, [
{ name: 'read_file', input: { _raw: '123' } },
{ name: 'list_dir', input: { _raw: '[1,2,3]' } },
]);
});
test('parseToolCalls still intercepts unknown schema names to avoid leaks', () => {
const payload = JSON.stringify({
tool_calls: [{ name: 'not_in_schema', input: { q: 'go' } }],
});
const calls = parseToolCalls(payload, ['search']);
assert.equal(calls.length, 1);
assert.equal(calls[0].name, 'not_in_schema');
});
test('parseToolCalls supports fenced json and function.arguments string payload', () => {
const text = [
'I will call a tool now.',
'```json',
'{"tool_calls":[{"function":{"name":"read_file","arguments":"{\\"path\\":\\"README.md\\"}"}}]}',
'```',
].join('\n');
const calls = parseToolCalls(text, ['read_file']);
assert.equal(calls.length, 0);
});
test('parseStandaloneToolCalls only matches standalone payload and ignores mixed prose', () => {
const mixed = '这里是示例:{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]},请勿执行。';
const standalone = '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}';
const mixedCalls = parseStandaloneToolCalls(mixed, ['read_file']);
const standaloneCalls = parseStandaloneToolCalls(standalone, ['read_file']);
assert.equal(mixedCalls.length, 0);
assert.equal(standaloneCalls.length, 1);
});
test('parseStandaloneToolCalls ignores fenced code block tool_call examples', () => {
const fenced = ['```json', '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}', '```'].join('\n');
const calls = parseStandaloneToolCalls(fenced, ['read_file']);
assert.equal(calls.length, 0);
});
test('sieve emits tool_calls and does not leak suspicious prefix on late key convergence', () => {
const events = runSieve(
[
'{"',
'tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}',
'后置正文C。',
],
['read_file'],
);
const leakedText = collectText(events);
const hasToolCall = events.some((evt) => evt.type === 'tool_calls' && Array.isArray(evt.calls) && evt.calls.length > 0);
const hasToolDelta = events.some((evt) => evt.type === 'tool_call_deltas' && Array.isArray(evt.deltas) && evt.deltas.length > 0);
assert.equal(hasToolCall || hasToolDelta, true);
assert.equal(leakedText.includes('{'), false);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false);
assert.equal(leakedText.includes('后置正文C。'), true);
});
test('sieve keeps embedded invalid tool-like json as normal text to avoid stream stalls', () => {
const events = runSieve(
[
'前置正文D。',
"{'tool_calls':[{'name':'read_file','input':{'path':'README.MD'}}]}",
'后置正文E。',
],
['read_file'],
);
const leakedText = collectText(events);
const hasToolCall = events.some((evt) => evt.type === 'tool_calls');
assert.equal(hasToolCall, false);
assert.equal(leakedText.includes('前置正文D。'), true);
assert.equal(leakedText.includes('后置正文E。'), true);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), true);
});
test('sieve flushes incomplete captured tool json as text on stream finalize', () => {
const events = runSieve(
['前置正文F。', '{"tool_calls":[{"name":"read_file"'],
['read_file'],
);
const leakedText = collectText(events);
assert.equal(leakedText.includes('前置正文F。'), true);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), true);
assert.equal(leakedText.includes('{'), true);
});
test('sieve keeps plain text intact in tool mode when no tool call appears', () => {
const events = runSieve(
['你好,', '这是普通文本回复。', '请继续。'],
['read_file'],
);
const leakedText = collectText(events);
const hasToolCall = events.some((evt) => evt.type === 'tool_calls');
assert.equal(hasToolCall, false);
assert.equal(leakedText, '你好,这是普通文本回复。请继续。');
});
test('sieve emits incremental tool_call_deltas for split arguments payload', () => {
const state = createToolSieveState();
const first = processToolSieveChunk(
state,
'{"tool_calls":[{"name":"read_file","input":{"path":"READ',
['read_file'],
);
const second = processToolSieveChunk(
state,
'ME.MD","mode":"head"}}]}',
['read_file'],
);
const tail = flushToolSieve(state, ['read_file']);
const events = [...first, ...second, ...tail];
const deltaEvents = events.filter((evt) => evt.type === 'tool_call_deltas');
assert.equal(deltaEvents.length > 0, true);
const merged = deltaEvents.flatMap((evt) => evt.deltas || []);
const hasName = merged.some((d) => d.name === 'read_file');
const argsJoined = merged
.map((d) => d.arguments || '')
.join('');
assert.equal(hasName, true);
assert.equal(argsJoined.includes('"path":"README.MD"'), true);
assert.equal(argsJoined.includes('"mode":"head"'), true);
});
test('sieve still intercepts tool call after leading plain text without suffix', () => {
const events = runSieve(
['我将调用工具。', '{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}'],
['read_file'],
);
const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0));
const leakedText = collectText(events);
assert.equal(hasTool, true);
assert.equal(leakedText.includes('我将调用工具。'), true);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false);
});
test('sieve intercepts tool call and preserves trailing same-chunk text', () => {
const events = runSieve(
['{"tool_calls":[{"name":"read_file","input":{"path":"README.MD"}}]}然后继续解释。'],
['read_file'],
);
const hasTool = events.some((evt) => (evt.type === 'tool_calls' && evt.calls?.length > 0) || (evt.type === 'tool_call_deltas' && evt.deltas?.length > 0));
const leakedText = collectText(events);
assert.equal(hasTool, true);
assert.equal(leakedText.includes('然后继续解释。'), true);
assert.equal(leakedText.toLowerCase().includes('tool_calls'), false);
});