Merge pull request #78 from majorcheng/main

fix: complete OpenAI logs and default persisted logs to summary
This commit is contained in:
Xu Kang
2026-03-20 14:29:27 +08:00
committed by GitHub
12 changed files with 1096 additions and 15 deletions

View File

@@ -80,6 +80,7 @@ cp config.yaml.example config.yaml
| `logging.file_enabled` | 日志文件持久化 | `false` |
| `logging.dir` | 日志存储目录 | `./logs` |
| `logging.max_days` | 日志保留天数 | `7` |
| `logging.persist_mode` | 日志落盘模式:`summary` 问答摘要 / `compact` 精简 / `full` 完整 | `summary` |
| `max_auto_continue` | 截断自动续写次数 (`0`=禁用,交由客户端续写) | `0` |
| `sanitize_response` | 响应内容清洗开关(替换 Cursor 身份引用为 Claude | `false` |
| `refusal_patterns` | 自定义拒绝检测规则列表(追加到内置规则) | 不配置 |
@@ -137,6 +138,8 @@ OPENAI_BASE_URL=http://localhost:3010/v1
- **阶段耗时** - 可视化时间线展示各阶段耗时receive → convert → send → response → complete
- **🌙 日/夜主题** - 一键切换明暗主题,自动记忆偏好
- **日志持久化** - 配置 `logging.file_enabled: true` 后日志写入 JSONL 文件,重启自动加载
- **摘要落盘(默认)** - `logging.persist_mode: summary` 仅保留“用户问题 + 模型回答”与少量元数据,体积最小
- **精简落盘** - `logging.persist_mode: compact` 保留更多排障字段,同时压缩磁盘 JSONL
### 鉴权

View File

@@ -169,7 +169,7 @@ vision:
# ==================== 日志持久化配置(可选) ====================
# 开启后日志会写入文件,重启后自动加载历史记录
# 环境变量: LOG_FILE_ENABLED=true|false, LOG_DIR=./logs
# 环境变量: LOG_FILE_ENABLED=true|false, LOG_DIR=./logs, LOG_PERSIST_MODE=compact|full|summary
logging:
# 是否启用日志文件持久化(默认关闭)
file_enabled: false
@@ -177,3 +177,8 @@ logging:
dir: "./logs"
# 日志保留天数(超过天数的日志文件会自动清理)
max_days: 7
# 落盘模式:
# compact = 精简调试信息(保留更多排障细节)
# full = 完整持久化
# summary = 仅保留“用户问了什么 / 模型答了什么”与少量元数据(默认)
persist_mode: summary

View File

@@ -262,6 +262,10 @@ function renderPromptsTab(tc){
}
// ===== 原始请求 =====
h+='<div class="content-section"><div class="cs-title">📥 客户端原始请求</div></div>';
if(curPayload.question){
h+='<div class="content-section"><div class="cs-title">❓ 用户问题摘要 <span class="cnt">'+fmtN(curPayload.question.length)+' chars</span></div>';
h+='<div class="resp-box" style="max-height:300px;overflow-y:auto;border-color:var(--orange)">'+escH(curPayload.question)+'<button class="copy-btn" onclick="copyText(curPayload.question)">复制</button></div></div>';
}
if(curPayload.systemPrompt){
h+='<div class="content-section"><div class="cs-title">🔒 原始 System Prompt <span class="cnt">'+fmtN(curPayload.systemPrompt.length)+' chars</span></div>';
h+='<div class="resp-box" style="max-height:400px;overflow-y:auto;border-color:var(--orange)">'+escH(curPayload.systemPrompt)+'<button class="copy-btn" onclick="copyText(curPayload.systemPrompt)">复制</button></div></div>';
@@ -294,6 +298,15 @@ function renderPromptsTab(tc){
function renderResponseTab(tc){
if(!curPayload){tc.innerHTML='<div class="empty"><div class="ic">📤</div><p>暂无响应数据</p></div>';return}
let h='';
if(curPayload.answer){
const title=curPayload.answerType==='tool_calls'?'✅ 最终结果(工具调用摘要)':'✅ 最终回答摘要';
h+='<div class="content-section"><div class="cs-title">'+title+' <span class="cnt">'+fmtN(curPayload.answer.length)+' chars</span></div>';
h+='<div class="resp-box diff" style="max-height:320px">'+escH(curPayload.answer)+'<button class="copy-btn" onclick="copyText(curPayload.answer)">复制</button></div></div>';
}
if(curPayload.toolCallNames&&curPayload.toolCallNames.length&&!curPayload.toolCalls){
h+='<div class="content-section"><div class="cs-title">🔧 工具调用名称 <span class="cnt">'+curPayload.toolCallNames.length+' 个</span></div>';
h+='<div class="resp-box">'+escH(curPayload.toolCallNames.join(', '))+'<button class="copy-btn" onclick="copyText(curPayload.toolCallNames.join(\', \'))">复制</button></div></div>';
}
if(curPayload.thinkingContent){
h+='<div class="content-section"><div class="cs-title">🧠 Thinking 内容 <span class="cnt">'+fmtN(curPayload.thinkingContent.length)+' chars</span></div>';
h+='<div class="resp-box" style="border-color:var(--purple);max-height:300px">'+escH(curPayload.thinkingContent)+'<button class="copy-btn" onclick="copyText(curPayload.thinkingContent)">复制</button></div></div>';

View File

@@ -74,10 +74,12 @@ function parseYamlConfig(defaults: AppConfig): { config: AppConfig; raw: Record<
}
// ★ 日志文件持久化
if (yaml.logging !== undefined) {
const persistModes = ['compact', 'full', 'summary'];
result.logging = {
file_enabled: yaml.logging.file_enabled === true, // 默认关闭
dir: yaml.logging.dir || './logs',
max_days: typeof yaml.logging.max_days === 'number' ? yaml.logging.max_days : 7,
persist_mode: persistModes.includes(yaml.logging.persist_mode) ? yaml.logging.persist_mode : 'summary',
};
}
// ★ 工具处理配置
@@ -139,13 +141,21 @@ function applyEnvOverrides(cfg: AppConfig): void {
}
// Logging 环境变量覆盖
if (process.env.LOG_FILE_ENABLED !== undefined) {
if (!cfg.logging) cfg.logging = { file_enabled: false, dir: './logs', max_days: 7 };
if (!cfg.logging) cfg.logging = { file_enabled: false, dir: './logs', max_days: 7, persist_mode: 'summary' };
cfg.logging.file_enabled = process.env.LOG_FILE_ENABLED === 'true' || process.env.LOG_FILE_ENABLED === '1';
}
if (process.env.LOG_DIR) {
if (!cfg.logging) cfg.logging = { file_enabled: false, dir: './logs', max_days: 7 };
if (!cfg.logging) cfg.logging = { file_enabled: false, dir: './logs', max_days: 7, persist_mode: 'summary' };
cfg.logging.dir = process.env.LOG_DIR;
}
if (process.env.LOG_PERSIST_MODE) {
if (!cfg.logging) cfg.logging = { file_enabled: false, dir: './logs', max_days: 7, persist_mode: 'summary' };
cfg.logging.persist_mode = process.env.LOG_PERSIST_MODE === 'full'
? 'full'
: process.env.LOG_PERSIST_MODE === 'summary'
? 'summary'
: 'compact';
}
// 工具透传模式环境变量覆盖
if (process.env.TOOLS_PASSTHROUGH !== undefined) {
if (!cfg.tools) cfg.tools = { schemaMode: 'full', descriptionMaxLength: 0 };

View File

@@ -153,7 +153,9 @@ loadLogsFromFiles();
app.listen(config.port, () => {
const auth = config.authTokens?.length ? `${config.authTokens.length} token(s)` : 'open';
const logPersist = config.logging?.file_enabled ? `file → ${config.logging.dir}` : 'memory only';
const logPersist = config.logging?.file_enabled
? `file(${config.logging.persist_mode || 'summary'}) → ${config.logging.dir}`
: 'memory only';
// Tools 配置摘要
const toolsCfg = config.tools;

View File

@@ -80,6 +80,14 @@ export interface RequestPayload {
retryResponses?: Array<{ attempt: number; response: string; reason: string }>;
/** 每次续写的原始响应 */
continuationResponses?: Array<{ index: number; response: string; dedupedLength: number }>;
/** summary 模式:最后一个用户问题 */
question?: string;
/** summary 模式:最终回答摘要 */
answer?: string;
/** summary 模式:回答类型 */
answerType?: 'text' | 'tool_calls' | 'empty';
/** summary 模式:工具调用名称列表 */
toolCallNames?: string[];
}
export interface RequestSummary {
@@ -133,12 +141,31 @@ function shortId(): string {
// ==================== 日志文件持久化 ====================
const DEFAULT_PERSIST_MODE: 'compact' | 'full' | 'summary' = 'summary';
const DISK_SYSTEM_PROMPT_CHARS = 2000;
const DISK_MESSAGE_PREVIEW_CHARS = 3000;
const DISK_CURSOR_MESSAGE_PREVIEW_CHARS = 2000;
const DISK_RESPONSE_CHARS = 8000;
const DISK_THINKING_CHARS = 4000;
const DISK_TOOL_DESC_CHARS = 500;
const DISK_RETRY_CHARS = 2000;
const DISK_TOOLCALL_STRING_CHARS = 1200;
const DISK_MAX_ARRAY_ITEMS = 20;
const DISK_MAX_OBJECT_DEPTH = 5;
const DISK_SUMMARY_QUESTION_CHARS = 2000;
const DISK_SUMMARY_ANSWER_CHARS = 4000;
function getLogDir(): string | null {
const cfg = getConfig();
if (!cfg.logging?.file_enabled) return null;
return cfg.logging.dir || './logs';
}
function getPersistMode(): 'compact' | 'full' | 'summary' {
const mode = getConfig().logging?.persist_mode;
return mode === 'full' || mode === 'summary' || mode === 'compact' ? mode : DEFAULT_PERSIST_MODE;
}
function getLogFilePath(): string | null {
const dir = getLogDir();
if (!dir) return null;
@@ -153,13 +180,256 @@ function ensureLogDir(): void {
}
}
function truncateMiddle(text: string, maxChars: number): string {
if (!text || text.length <= maxChars) return text;
const omitted = text.length - maxChars;
const marker = `\n...[截断 ${omitted} chars]...\n`;
const remain = Math.max(16, maxChars - marker.length);
const head = Math.ceil(remain * 0.7);
const tail = Math.max(8, remain - head);
return text.slice(0, head) + marker + text.slice(text.length - tail);
}
function compactUnknownValue(value: unknown, maxStringChars = DISK_TOOLCALL_STRING_CHARS, depth = 0): unknown {
if (value === null || value === undefined) return value;
if (typeof value === 'string') return truncateMiddle(value, maxStringChars);
if (typeof value === 'number' || typeof value === 'boolean' || typeof value === 'bigint') return value;
if (depth >= DISK_MAX_OBJECT_DEPTH) {
if (Array.isArray(value)) return `[array(${value.length})]`;
return '[object]';
}
if (Array.isArray(value)) {
const items = value.slice(0, DISK_MAX_ARRAY_ITEMS)
.map(item => compactUnknownValue(item, maxStringChars, depth + 1));
if (value.length > DISK_MAX_ARRAY_ITEMS) {
items.push(`[... ${value.length - DISK_MAX_ARRAY_ITEMS} more items]`);
}
return items;
}
if (typeof value === 'object') {
const result: Record<string, unknown> = {};
for (const [key, entry] of Object.entries(value as Record<string, unknown>)) {
const limit = /content|text|arguments|description|prompt|response|reasoning/i.test(key)
? maxStringChars
: Math.min(maxStringChars, 400);
result[key] = compactUnknownValue(entry, limit, depth + 1);
}
return result;
}
return String(value);
}
function extractTextParts(value: unknown): string {
if (typeof value === 'string') return value;
if (!value) return '';
if (Array.isArray(value)) {
return value
.map(item => extractTextParts(item))
.filter(Boolean)
.join('\n');
}
if (typeof value === 'object') {
const record = value as Record<string, unknown>;
if (typeof record.text === 'string') return record.text;
if (typeof record.output === 'string') return record.output;
if (typeof record.content === 'string') return record.content;
if (record.content !== undefined) return extractTextParts(record.content);
if (record.input !== undefined) return extractTextParts(record.input);
}
return '';
}
function extractLastUserQuestion(summary: RequestSummary, payload: RequestPayload): string | undefined {
const lastUser = payload.messages?.slice().reverse().find(m => m.role === 'user' && m.contentPreview?.trim());
if (lastUser?.contentPreview) {
return truncateMiddle(lastUser.contentPreview, DISK_SUMMARY_QUESTION_CHARS);
}
const original = payload.originalRequest && typeof payload.originalRequest === 'object' && !Array.isArray(payload.originalRequest)
? payload.originalRequest as Record<string, unknown>
: undefined;
if (!original) {
return summary.title ? truncateMiddle(summary.title, DISK_SUMMARY_QUESTION_CHARS) : undefined;
}
if (Array.isArray(original.messages)) {
for (let i = original.messages.length - 1; i >= 0; i--) {
const item = original.messages[i] as Record<string, unknown>;
if (item?.role === 'user') {
const text = extractTextParts(item.content);
if (text.trim()) return truncateMiddle(text, DISK_SUMMARY_QUESTION_CHARS);
}
}
}
if (typeof original.input === 'string' && original.input.trim()) {
return truncateMiddle(original.input, DISK_SUMMARY_QUESTION_CHARS);
}
if (Array.isArray(original.input)) {
for (let i = original.input.length - 1; i >= 0; i--) {
const item = original.input[i] as Record<string, unknown>;
if (!item) continue;
const role = typeof item.role === 'string' ? item.role : 'user';
if (role === 'user') {
const text = extractTextParts(item.content ?? item.input ?? item);
if (text.trim()) return truncateMiddle(text, DISK_SUMMARY_QUESTION_CHARS);
}
}
}
return summary.title ? truncateMiddle(summary.title, DISK_SUMMARY_QUESTION_CHARS) : undefined;
}
function extractToolCallNames(payload: RequestPayload): string[] {
if (!payload.toolCalls?.length) return [];
return payload.toolCalls
.map(call => {
if (call && typeof call === 'object') {
const record = call as Record<string, unknown>;
if (typeof record.name === 'string') return record.name;
const fn = record.function;
if (fn && typeof fn === 'object' && typeof (fn as Record<string, unknown>).name === 'string') {
return (fn as Record<string, unknown>).name as string;
}
}
return '';
})
.filter(Boolean);
}
function buildSummaryPayload(summary: RequestSummary, payload: RequestPayload): RequestPayload {
const question = extractLastUserQuestion(summary, payload);
const answerText = payload.finalResponse || payload.rawResponse || '';
const toolCallNames = extractToolCallNames(payload);
const answer = answerText
? truncateMiddle(answerText, DISK_SUMMARY_ANSWER_CHARS)
: toolCallNames.length > 0
? `[tool_calls] ${toolCallNames.join(', ')}`
: undefined;
return {
...(question ? { question } : {}),
...(answer ? { answer } : {}),
answerType: answerText ? 'text' : toolCallNames.length > 0 ? 'tool_calls' : 'empty',
...(toolCallNames.length > 0 ? { toolCallNames } : {}),
};
}
function buildCompactOriginalRequest(summary: RequestSummary, payload: RequestPayload): Record<string, unknown> | undefined {
const original = payload.originalRequest && typeof payload.originalRequest === 'object' && !Array.isArray(payload.originalRequest)
? payload.originalRequest as Record<string, unknown>
: undefined;
const result: Record<string, unknown> = {
model: summary.model,
stream: summary.stream,
apiFormat: summary.apiFormat,
messageCount: summary.messageCount,
toolCount: summary.toolCount,
};
if (summary.title) result.title = summary.title;
if (payload.systemPrompt) result.systemPromptPreview = truncateMiddle(payload.systemPrompt, DISK_SYSTEM_PROMPT_CHARS);
if (payload.messages?.some(m => m.hasImages)) result.hasImages = true;
const lastUser = payload.messages?.slice().reverse().find(m => m.role === 'user');
if (lastUser?.contentPreview) {
result.lastUserPreview = truncateMiddle(lastUser.contentPreview, 800);
}
if (original) {
for (const key of ['temperature', 'top_p', 'max_tokens', 'max_completion_tokens', 'max_output_tokens']) {
const value = original[key];
if (value !== undefined && typeof value !== 'object') result[key] = value;
}
if (typeof original.instructions === 'string') {
result.instructions = truncateMiddle(original.instructions, 1200);
}
if (typeof original.system === 'string') {
result.system = truncateMiddle(original.system, DISK_SYSTEM_PROMPT_CHARS);
}
}
return Object.keys(result).length > 0 ? result : undefined;
}
function compactPayloadForDisk(summary: RequestSummary, payload: RequestPayload): RequestPayload {
const compact: RequestPayload = {};
if (payload.originalRequest !== undefined) {
compact.originalRequest = buildCompactOriginalRequest(summary, payload);
}
if (payload.systemPrompt) {
compact.systemPrompt = truncateMiddle(payload.systemPrompt, DISK_SYSTEM_PROMPT_CHARS);
}
if (payload.messages?.length) {
compact.messages = payload.messages.map(msg => ({
...msg,
contentPreview: truncateMiddle(msg.contentPreview, DISK_MESSAGE_PREVIEW_CHARS),
}));
}
if (payload.tools?.length) {
compact.tools = payload.tools.map(tool => ({
name: tool.name,
...(tool.description ? { description: truncateMiddle(tool.description, DISK_TOOL_DESC_CHARS) } : {}),
}));
}
if (payload.cursorRequest !== undefined) {
compact.cursorRequest = payload.cursorRequest;
}
if (payload.cursorMessages?.length) {
compact.cursorMessages = payload.cursorMessages.map(msg => ({
...msg,
contentPreview: truncateMiddle(msg.contentPreview, DISK_CURSOR_MESSAGE_PREVIEW_CHARS),
}));
}
const compactFinalResponse = payload.finalResponse
? truncateMiddle(payload.finalResponse, DISK_RESPONSE_CHARS)
: undefined;
const compactRawResponse = payload.rawResponse
? truncateMiddle(payload.rawResponse, DISK_RESPONSE_CHARS)
: undefined;
if (compactFinalResponse) compact.finalResponse = compactFinalResponse;
if (compactRawResponse && compactRawResponse !== compactFinalResponse) {
compact.rawResponse = compactRawResponse;
}
if (payload.thinkingContent) {
compact.thinkingContent = truncateMiddle(payload.thinkingContent, DISK_THINKING_CHARS);
}
if (payload.toolCalls?.length) {
compact.toolCalls = compactUnknownValue(payload.toolCalls) as unknown[];
}
if (payload.retryResponses?.length) {
compact.retryResponses = payload.retryResponses.map(item => ({
...item,
response: truncateMiddle(item.response, DISK_RETRY_CHARS),
reason: truncateMiddle(item.reason, 300),
}));
}
if (payload.continuationResponses?.length) {
compact.continuationResponses = payload.continuationResponses.map(item => ({
...item,
response: truncateMiddle(item.response, DISK_RETRY_CHARS),
}));
}
return compact;
}
/** 将已完成的请求写入日志文件 */
function persistRequest(summary: RequestSummary, payload: RequestPayload): void {
const filepath = getLogFilePath();
if (!filepath) return;
try {
ensureLogDir();
const record = { timestamp: Date.now(), summary, payload };
const persistMode = getPersistMode();
const persistedPayload = persistMode === 'full'
? payload
: persistMode === 'summary'
? buildSummaryPayload(summary, payload)
: compactPayloadForDisk(summary, payload);
const record = { timestamp: Date.now(), summary, payload: persistedPayload };
appendFileSync(filepath, JSON.stringify(record) + '\n', 'utf-8');
} catch (e) {
console.warn('[Logger] 写入日志文件失败:', e);

View File

@@ -27,7 +27,7 @@ import type {
import { convertToCursorRequest, parseToolCalls, hasToolCalls } from './converter.js';
import { sendCursorRequest, sendCursorRequestFull } from './cursor-client.js';
import { getConfig } from './config.js';
import { createRequestLogger } from './logger.js';
import { createRequestLogger, type RequestLogger } from './logger.js';
import { createIncrementalTextStreamer, hasLeadingThinking, splitLeadingThinkingBlocks, stripThinkingTags } from './streaming-text.js';
import {
autoContinueCursorToolResponseFull,
@@ -488,11 +488,12 @@ export async function handleOpenAIChatCompletions(req: Request, res: Response):
// Step 2: Anthropic → Cursor 格式(复用现有管道)
const cursorReq = await convertToCursorRequest(anthropicReq);
log.recordCursorRequest(cursorReq);
if (body.stream) {
await handleOpenAIStream(res, cursorReq, body, anthropicReq);
await handleOpenAIStream(res, cursorReq, body, anthropicReq, log);
} else {
await handleOpenAINonStream(res, cursorReq, body, anthropicReq);
await handleOpenAINonStream(res, cursorReq, body, anthropicReq, log);
}
} catch (err: unknown) {
const message = err instanceof Error ? err.message : String(err);
@@ -609,6 +610,7 @@ async function handleOpenAIIncrementalTextStream(
body: OpenAIChatRequest,
anthropicReq: AnthropicRequest,
streamMeta: { id: string; created: number; model: string },
log: RequestLogger,
): Promise<void> {
let activeCursorReq = cursorReq;
let retryCount = 0;
@@ -739,6 +741,16 @@ async function handleOpenAIIncrementalTextStream(
usage: buildOpenAIUsage(anthropicReq, streamer.hasSentText() ? (finalVisibleText || finalRawResponse) : finalTextToSend),
});
log.recordRawResponse(finalRawResponse);
if (finalReasoningContent) {
log.recordThinking(finalReasoningContent);
}
const finalRecordedResponse = streamer.hasSentText()
? sanitizeResponse(finalVisibleText || finalRawResponse)
: finalTextToSend;
log.recordFinalResponse(finalRecordedResponse);
log.complete(finalRecordedResponse.length, 'stop');
res.write('data: [DONE]\n\n');
res.end();
}
@@ -750,6 +762,7 @@ async function handleOpenAIStream(
cursorReq: CursorChatRequest,
body: OpenAIChatRequest,
anthropicReq: AnthropicRequest,
log: RequestLogger,
): Promise<void> {
res.writeHead(200, {
'Content-Type': 'text/event-stream',
@@ -790,7 +803,7 @@ async function handleOpenAIStream(
try {
if (!hasTools && (!body.response_format || body.response_format.type === 'text')) {
await handleOpenAIIncrementalTextStream(res, cursorReq, body, anthropicReq, { id, created, model });
await handleOpenAIIncrementalTextStream(res, cursorReq, body, anthropicReq, { id, created, model }, log);
return;
}
@@ -973,6 +986,8 @@ async function handleOpenAIStream(
if (toolCalls.length > 0) {
finishReason = 'tool_calls';
log.recordToolCalls(toolCalls);
log.updateSummary({ toolCallsDetected: toolCalls.length });
// 发送工具调用前的残余文本 — 如果混合流式已发送则跳过
if (!hybridTextSent) {
@@ -1083,10 +1098,18 @@ async function handleOpenAIStream(
usage: buildOpenAIUsage(anthropicReq, fullResponse),
});
log.recordRawResponse(fullResponse);
if (reasoningContent) {
log.recordThinking(reasoningContent);
}
log.recordFinalResponse(fullResponse);
log.complete(fullResponse.length, finishReason);
res.write('data: [DONE]\n\n');
} catch (err: unknown) {
const message = err instanceof Error ? err.message : String(err);
log.fail(message);
writeOpenAISSE(res, {
id, object: 'chat.completion.chunk', created, model,
choices: [{
@@ -1108,6 +1131,7 @@ async function handleOpenAINonStream(
cursorReq: CursorChatRequest,
body: OpenAIChatRequest,
anthropicReq: AnthropicRequest,
log: RequestLogger,
): Promise<void> {
let activeCursorReq = cursorReq;
let fullText = await sendCursorRequestFull(activeCursorReq);
@@ -1172,6 +1196,8 @@ async function handleOpenAINonStream(
if (parsed.toolCalls.length > 0) {
finishReason = 'tool_calls';
log.recordToolCalls(parsed.toolCalls);
log.updateSummary({ toolCallsDetected: parsed.toolCalls.length });
// 清洗拒绝文本
let cleanText = parsed.cleanText;
if (isRefusal(cleanText)) {
@@ -1224,6 +1250,13 @@ async function handleOpenAINonStream(
};
res.json(response);
log.recordRawResponse(fullText);
if (reasoningContent) {
log.recordThinking(reasoningContent);
}
log.recordFinalResponse(fullText);
log.complete(fullText.length, finishReason);
}
// ==================== 工具函数 ====================
@@ -1300,17 +1333,39 @@ function buildResponseObject(
* 而非 data: {"object":"chat.completion.chunk",...} 格式
*/
export async function handleOpenAIResponses(req: Request, res: Response): Promise<void> {
try {
const body = req.body;
const isStream = (body.stream as boolean) ?? true;
const body = req.body as Record<string, unknown>;
const isStream = (body.stream as boolean) ?? true;
const chatBody = responsesToChatCompletions(body);
const log = createRequestLogger({
method: req.method,
path: req.path,
model: chatBody.model,
stream: isStream,
hasTools: (chatBody.tools?.length ?? 0) > 0,
toolCount: chatBody.tools?.length ?? 0,
messageCount: chatBody.messages?.length ?? 0,
apiFormat: 'responses',
});
log.startPhase('receive', '接收请求');
log.recordOriginalRequest(body);
log.info('OpenAI', 'receive', '收到 OpenAI Responses 请求', {
model: chatBody.model,
stream: isStream,
toolCount: chatBody.tools?.length ?? 0,
messageCount: chatBody.messages?.length ?? 0,
});
try {
// Step 1: 转换请求格式 Responses → Chat Completions → Anthropic → Cursor
const chatBody = responsesToChatCompletions(body);
log.startPhase('convert', '格式转换 (ResponsesChat→Anthropic)');
const anthropicReq = convertToAnthropicRequest(chatBody);
const cursorReq = await convertToCursorRequest(anthropicReq);
log.endPhase();
log.recordCursorRequest(cursorReq);
// 身份探针拦截
if (isIdentityProbe(anthropicReq)) {
log.intercepted('身份探针拦截 (Responses)');
const mockText = "I am Claude, an advanced AI programming assistant created by Anthropic. I am ready to help you write code, debug, and answer your technical questions.";
if (isStream) {
return handleResponsesStreamMock(res, body, mockText);
@@ -1320,12 +1375,13 @@ export async function handleOpenAIResponses(req: Request, res: Response): Promis
}
if (isStream) {
await handleResponsesStream(res, cursorReq, body, anthropicReq);
await handleResponsesStream(res, cursorReq, body, anthropicReq, log);
} else {
await handleResponsesNonStream(res, cursorReq, body, anthropicReq);
await handleResponsesNonStream(res, cursorReq, body, anthropicReq, log);
}
} catch (err: unknown) {
const message = err instanceof Error ? err.message : String(err);
log.fail(message);
console.error(`[OpenAI] /v1/responses 处理失败:`, message);
const status = err instanceof OpenAIRequestError ? err.status : 500;
const type = err instanceof OpenAIRequestError ? err.type : 'server_error';
@@ -1471,6 +1527,7 @@ async function handleResponsesStream(
cursorReq: CursorChatRequest,
body: Record<string, unknown>,
anthropicReq: AnthropicRequest,
log: RequestLogger,
): Promise<void> {
res.writeHead(200, {
'Content-Type': 'text/event-stream',
@@ -1482,6 +1539,7 @@ async function handleResponsesStream(
const respId = responsesId();
const model = (body.model as string) || 'gpt-4';
const hasTools = (anthropicReq.tools?.length ?? 0) > 0;
let toolCallsDetected = 0;
// 缓冲完整响应再处理(复用 Chat Completions 的逻辑)
let fullResponse = '';
@@ -1557,6 +1615,9 @@ async function handleResponsesStream(
const { toolCalls, cleanText } = parseToolCalls(fullResponse);
if (toolCalls.length > 0) {
toolCallsDetected = toolCalls.length;
log.recordToolCalls(toolCalls);
log.updateSummary({ toolCallsDetected: toolCalls.length });
// 1. response.created + response.in_progress
writeResponsesSSE(res, 'response.created', buildResponseObject(respId, model, 'in_progress', []));
writeResponsesSSE(res, 'response.in_progress', buildResponseObject(respId, model, 'in_progress', []));
@@ -1658,8 +1719,12 @@ async function handleResponsesStream(
const msgItemId = responsesItemId();
emitResponsesTextStream(res, respId, msgItemId, model, fullResponse, 0, usage);
}
log.recordRawResponse(fullResponse);
log.recordFinalResponse(fullResponse);
log.complete(fullResponse.length, toolCallsDetected > 0 ? 'tool_calls' : 'stop');
} catch (err: unknown) {
const message = err instanceof Error ? err.message : String(err);
log.fail(message);
// 尝试发送错误后的 response.completed确保 Codex 不会等待超时
try {
const errorText = `[Error: ${message}]`;
@@ -1707,6 +1772,7 @@ async function handleResponsesNonStream(
cursorReq: CursorChatRequest,
body: Record<string, unknown>,
anthropicReq: AnthropicRequest,
log: RequestLogger,
): Promise<void> {
let activeCursorReq = cursorReq;
let fullText = await sendCursorRequestFull(activeCursorReq);
@@ -1752,9 +1818,13 @@ async function handleResponsesNonStream(
const usage = { input_tokens: inputTokens, output_tokens: outputTokens, total_tokens: inputTokens + outputTokens };
const output: Record<string, unknown>[] = [];
let toolCallsDetected = 0;
if (hasTools && hasToolCalls(fullText)) {
const { toolCalls, cleanText } = parseToolCalls(fullText);
toolCallsDetected = toolCalls.length;
log.recordToolCalls(toolCalls);
log.updateSummary({ toolCallsDetected: toolCalls.length });
for (const tc of toolCalls) {
output.push({
id: responsesItemId(),
@@ -1786,6 +1856,10 @@ async function handleResponsesNonStream(
}
res.json(buildResponseObject(respId, model, 'completed', output, usage));
log.recordRawResponse(fullText);
log.recordFinalResponse(fullText);
log.complete(fullText.length, toolCallsDetected > 0 ? 'tool_calls' : 'stop');
}
/**

View File

@@ -129,6 +129,7 @@ export interface AppConfig {
file_enabled: boolean; // 是否启用日志文件持久化
dir: string; // 日志文件存储目录
max_days: number; // 日志保留天数
persist_mode: 'compact' | 'full' | 'summary'; // 落盘模式: compact=精简, full=完整, summary=仅问答摘要
};
tools?: {
schemaMode: 'compact' | 'full' | 'names_only'; // Schema 呈现模式

View File

@@ -0,0 +1,136 @@
/**
* test/unit-log-persist-compact.mjs
*
* 回归测试compact 落盘模式应保留摘要信息,同时显著压缩 JSONL payload。
* 运行方式npm run build && node test/unit-log-persist-compact.mjs
*/
import fs from 'fs';
import path from 'path';
const LOG_DIR = '/tmp/cursor2api-log-compact';
process.env.LOG_FILE_ENABLED = '1';
process.env.LOG_DIR = LOG_DIR;
process.env.LOG_PERSIST_MODE = 'compact';
const { createRequestLogger, clearAllLogs, getRequestPayload } = await import('../dist/logger.js');
let passed = 0;
let failed = 0;
function assert(condition, msg) {
if (!condition) throw new Error(msg || 'Assertion failed');
}
function assertEqual(a, b, msg) {
const as = JSON.stringify(a);
const bs = JSON.stringify(b);
if (as !== bs) throw new Error(msg || `Expected ${bs}, got ${as}`);
}
function resetLogs() {
clearAllLogs();
fs.rmSync(LOG_DIR, { recursive: true, force: true });
}
function latestPersistedRecord() {
const files = fs.readdirSync(LOG_DIR).filter(name => name.endsWith('.jsonl')).sort();
assert(files.length > 0, '应生成 JSONL 文件');
const lastFile = path.join(LOG_DIR, files[files.length - 1]);
const lines = fs.readFileSync(lastFile, 'utf8').split('\n').filter(Boolean);
assert(lines.length > 0, 'JSONL 文件不应为空');
return JSON.parse(lines[lines.length - 1]);
}
async function runTest(name, fn) {
try {
resetLogs();
await fn();
console.log(`${name}`);
passed++;
} catch (e) {
console.error(`${name}`);
console.error(` ${e.message}`);
failed++;
}
}
console.log('\n📦 [1] compact 落盘模式回归\n');
await runTest('磁盘 payload 应截断长文本并去掉重复 rawResponse', async () => {
const hugePrompt = 'PROMPT-'.repeat(1200);
const hugeResponse = 'RESPONSE-'.repeat(1600);
const hugeCursor = 'CURSOR-'.repeat(900);
const hugeToolDesc = 'DESC-'.repeat(500);
const logger = createRequestLogger({
method: 'POST',
path: '/v1/chat/completions',
model: 'gpt-4.1',
stream: true,
hasTools: true,
toolCount: 1,
messageCount: 1,
apiFormat: 'openai',
});
logger.recordOriginalRequest({
model: 'gpt-4.1',
stream: true,
temperature: 0.2,
messages: [{ role: 'user', content: hugePrompt }],
tools: [{
type: 'function',
function: {
name: 'write_file',
description: hugeToolDesc,
},
}],
});
logger.recordCursorRequest({
model: 'anthropic/claude-sonnet-4.6',
messages: [{
role: 'user',
parts: [{ type: 'text', text: hugeCursor }],
}],
});
logger.recordToolCalls([{
name: 'write_file',
arguments: {
path: '/tmp/demo.txt',
content: 'X'.repeat(5000),
},
}]);
logger.recordRawResponse(hugeResponse);
logger.recordFinalResponse(hugeResponse);
logger.complete(hugeResponse.length, 'stop');
const persisted = latestPersistedRecord();
const diskPayload = persisted.payload;
const memoryPayload = getRequestPayload(persisted.summary.requestId);
assert(memoryPayload, '内存 payload 应存在');
assert(memoryPayload.rawResponse.length > diskPayload.finalResponse.length, '内存 payload 应保留完整文本');
assertEqual(persisted.summary.status, 'success');
assert(diskPayload.finalResponse.length < hugeResponse.length, '落盘 finalResponse 应被截断');
assert(diskPayload.finalResponse.includes('...[截断 '), '落盘 finalResponse 应标记截断');
assertEqual(diskPayload.rawResponse, undefined, 'rawResponse 与 finalResponse 相同,应省略落盘 rawResponse');
assert(diskPayload.messages[0].contentPreview.length < hugePrompt.length, '落盘消息预览应被截断');
assert(diskPayload.messages[0].contentPreview.includes('...[截断 '), '落盘消息预览应标记截断');
assert(diskPayload.cursorMessages[0].contentPreview.length < hugeCursor.length, '落盘 Cursor 消息应被截断');
assert(diskPayload.tools[0].description.length < hugeToolDesc.length, '落盘工具描述应被截断');
assert(diskPayload.originalRequest.messageCount === 1, '落盘 originalRequest 应转为精简 meta');
assertEqual(Array.isArray(diskPayload.originalRequest.messages), false, '落盘 originalRequest 不应保留完整 messages 数组');
const compactToolCalls = JSON.stringify(diskPayload.toolCalls);
assert(compactToolCalls.length < JSON.stringify(memoryPayload.toolCalls).length, '落盘 toolCalls 应被递归压缩');
});
console.log('\n' + '═'.repeat(55));
console.log(` 结果: ${passed} 通过 / ${failed} 失败 / ${passed + failed} 总计`);
console.log('═'.repeat(55) + '\n');
if (failed > 0) process.exit(1);

View File

@@ -0,0 +1,131 @@
/**
* test/unit-log-persist-default-summary.mjs
*
* 回归测试:未显式设置 LOG_PERSIST_MODE / logging.persist_mode 时,
* 默认落盘模式应为 summary。
* 运行方式npm run build && node test/unit-log-persist-default-summary.mjs
*/
import fs from 'fs';
import path from 'path';
const LOG_DIR = '/tmp/cursor2api-log-default-summary';
process.env.LOG_FILE_ENABLED = '1';
process.env.LOG_DIR = LOG_DIR;
delete process.env.LOG_PERSIST_MODE;
const { handleOpenAIChatCompletions } = await import('../dist/openai-handler.js');
const { clearAllLogs } = await import('../dist/logger.js');
let passed = 0;
let failed = 0;
function assert(condition, msg) {
if (!condition) throw new Error(msg || 'Assertion failed');
}
function createCursorSseResponse(deltas) {
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
for (const delta of deltas) {
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: 'text-delta', delta })}\n\n`));
}
controller.close();
},
});
return new Response(stream, {
status: 200,
headers: { 'Content-Type': 'text/event-stream' },
});
}
class MockResponse {
constructor() {
this.statusCode = 200;
this.headers = {};
this.body = '';
this.ended = false;
}
writeHead(statusCode, headers) {
this.statusCode = statusCode;
this.headers = { ...this.headers, ...headers };
}
write(chunk) {
this.body += String(chunk);
return true;
}
end(chunk = '') {
this.body += String(chunk);
this.ended = true;
}
json(obj) {
this.writeHead(this.statusCode, { 'Content-Type': 'application/json' });
this.end(JSON.stringify(obj));
}
status(code) {
this.statusCode = code;
return this;
}
}
function resetLogs() {
clearAllLogs();
fs.rmSync(LOG_DIR, { recursive: true, force: true });
}
function latestPersistedRecord() {
const files = fs.readdirSync(LOG_DIR).filter(name => name.endsWith('.jsonl')).sort();
assert(files.length > 0, '应生成 JSONL 文件');
const file = path.join(LOG_DIR, files[files.length - 1]);
const lines = fs.readFileSync(file, 'utf8').split('\n').filter(Boolean);
assert(lines.length > 0, 'JSONL 不应为空');
return JSON.parse(lines[lines.length - 1]);
}
async function runTest(name, fn) {
try {
resetLogs();
await fn();
console.log(`${name}`);
passed++;
} catch (e) {
console.error(`${name}`);
console.error(` ${e.message}`);
failed++;
}
}
console.log('\n📦 [1] 默认落盘模式为 summary 回归\n');
await runTest('未显式配置 persist_mode 时默认只保留问答摘要', async () => {
const originalFetch = global.fetch;
global.fetch = async () => createCursorSseResponse(['Hello', ' world']);
try {
const req = {
method: 'POST',
path: '/v1/chat/completions',
body: {
model: 'gpt-4.1',
stream: true,
messages: [{ role: 'user', content: 'Please greet me briefly.' }],
},
};
const res = new MockResponse();
await handleOpenAIChatCompletions(req, res);
const persisted = latestPersistedRecord();
assert(persisted.payload.question.includes('Please greet me briefly.'), '默认模式应保留 question');
assert(persisted.payload.answer.includes('Hello world'), '默认模式应保留 answer');
assert(persisted.payload.finalResponse === undefined, '默认模式不应保留 finalResponse');
assert(persisted.payload.messages === undefined, '默认模式不应保留 messages');
} finally {
global.fetch = originalFetch;
}
});
console.log('\n' + '═'.repeat(55));
console.log(` 结果: ${passed} 通过 / ${failed} 失败 / ${passed + failed} 总计`);
console.log('═'.repeat(55) + '\n');
if (failed > 0) process.exit(1);

View File

@@ -0,0 +1,259 @@
/**
* test/unit-openai-log-persistence.mjs
*
* 回归测试OpenAI Chat / Responses 成功请求应更新 summary 并落盘 JSONL。
* 运行方式npm run build && node test/unit-openai-log-persistence.mjs
*/
import fs from 'fs';
import path from 'path';
const LOG_DIR = '/tmp/cursor2api-openai-log-persistence';
process.env.LOG_FILE_ENABLED = '1';
process.env.LOG_DIR = LOG_DIR;
const { handleOpenAIChatCompletions, handleOpenAIResponses } = await import('../dist/openai-handler.js');
const { clearAllLogs, getRequestSummaries } = await import('../dist/logger.js');
let passed = 0;
let failed = 0;
function assert(condition, msg) {
if (!condition) throw new Error(msg || 'Assertion failed');
}
function assertEqual(a, b, msg) {
const as = JSON.stringify(a);
const bs = JSON.stringify(b);
if (as !== bs) throw new Error(msg || `Expected ${bs}, got ${as}`);
}
function createCursorSseResponse(deltas) {
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
for (const delta of deltas) {
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: 'text-delta', delta })}\n\n`));
}
controller.close();
},
});
return new Response(stream, {
status: 200,
headers: { 'Content-Type': 'text/event-stream' },
});
}
class MockResponse {
constructor() {
this.statusCode = 200;
this.headers = {};
this.body = '';
this.ended = false;
}
writeHead(statusCode, headers) {
this.statusCode = statusCode;
this.headers = { ...this.headers, ...headers };
}
write(chunk) {
this.body += String(chunk);
return true;
}
end(chunk = '') {
this.body += String(chunk);
this.ended = true;
}
json(obj) {
this.writeHead(this.statusCode, { 'Content-Type': 'application/json' });
this.end(JSON.stringify(obj));
}
status(code) {
this.statusCode = code;
return this;
}
}
function resetLogs() {
clearAllLogs();
fs.rmSync(LOG_DIR, { recursive: true, force: true });
}
function readPersistedRecords() {
if (!fs.existsSync(LOG_DIR)) return [];
const files = fs.readdirSync(LOG_DIR)
.filter(name => name.endsWith('.jsonl'))
.sort();
const rows = [];
for (const file of files) {
const lines = fs.readFileSync(path.join(LOG_DIR, file), 'utf8')
.split('\n')
.filter(Boolean);
for (const line of lines) {
rows.push(JSON.parse(line));
}
}
return rows;
}
function latestSummary() {
return getRequestSummaries(10)[0];
}
async function withMockCursor(deltas, fn) {
const originalFetch = global.fetch;
global.fetch = async () => createCursorSseResponse(deltas);
try {
await fn();
} finally {
global.fetch = originalFetch;
}
}
async function runTest(name, fn) {
try {
resetLogs();
await fn();
console.log(`${name}`);
passed++;
} catch (e) {
console.error(`${name}`);
console.error(` ${e.message}`);
failed++;
}
}
console.log('\n📦 [1] OpenAI 成功请求日志持久化回归\n');
await runTest('Chat Completions stream=true 会完成 summary 并落盘', async () => {
await withMockCursor(['Hello', ' world'], async () => {
const req = {
method: 'POST',
path: '/v1/chat/completions',
body: {
model: 'gpt-4.1',
stream: true,
messages: [{ role: 'user', content: 'Say hello' }],
},
};
const res = new MockResponse();
await handleOpenAIChatCompletions(req, res);
assert(res.ended, '响应应结束');
const summary = latestSummary();
assert(summary, '应生成 summary');
assertEqual(summary.path, '/v1/chat/completions');
assertEqual(summary.stream, true);
assertEqual(summary.status, 'success');
assert(summary.responseChars > 0, 'responseChars 应大于 0');
const records = readPersistedRecords();
const persisted = records.find(r => r.summary?.requestId === summary.requestId);
assert(persisted, '应写入 JSONL');
assertEqual(persisted.summary.status, 'success');
assertEqual(persisted.summary.stream, true);
});
});
await runTest('Chat Completions stream=false 会完成 summary 并落盘', async () => {
await withMockCursor(['Hello', ' world'], async () => {
const req = {
method: 'POST',
path: '/v1/chat/completions',
body: {
model: 'gpt-4.1',
stream: false,
messages: [{ role: 'user', content: 'Say hello' }],
},
};
const res = new MockResponse();
await handleOpenAIChatCompletions(req, res);
assert(res.ended, '响应应结束');
const summary = latestSummary();
assert(summary, '应生成 summary');
assertEqual(summary.path, '/v1/chat/completions');
assertEqual(summary.stream, false);
assertEqual(summary.status, 'success');
assert(summary.responseChars > 0, 'responseChars 应大于 0');
const records = readPersistedRecords();
const persisted = records.find(r => r.summary?.requestId === summary.requestId);
assert(persisted, '应写入 JSONL');
assertEqual(persisted.summary.status, 'success');
assertEqual(persisted.summary.stream, false);
});
});
await runTest('Responses stream=true 会完成 summary 并落盘', async () => {
await withMockCursor(['Hello', ' world'], async () => {
const req = {
method: 'POST',
path: '/v1/responses',
body: {
model: 'gpt-4.1',
stream: true,
input: 'Say hello',
},
};
const res = new MockResponse();
await handleOpenAIResponses(req, res);
assert(res.ended, '响应应结束');
const summary = latestSummary();
assert(summary, '应生成 summary');
assertEqual(summary.path, '/v1/responses');
assertEqual(summary.stream, true);
assertEqual(summary.apiFormat, 'responses');
assertEqual(summary.status, 'success');
assert(summary.responseChars > 0, 'responseChars 应大于 0');
const records = readPersistedRecords();
const persisted = records.find(r => r.summary?.requestId === summary.requestId);
assert(persisted, '应写入 JSONL');
assertEqual(persisted.summary.status, 'success');
assertEqual(persisted.summary.stream, true);
});
});
await runTest('Responses stream=false 会完成 summary 并落盘', async () => {
await withMockCursor(['Hello', ' world'], async () => {
const req = {
method: 'POST',
path: '/v1/responses',
body: {
model: 'gpt-4.1',
stream: false,
input: 'Say hello',
},
};
const res = new MockResponse();
await handleOpenAIResponses(req, res);
assert(res.ended, '响应应结束');
const summary = latestSummary();
assert(summary, '应生成 summary');
assertEqual(summary.path, '/v1/responses');
assertEqual(summary.stream, false);
assertEqual(summary.apiFormat, 'responses');
assertEqual(summary.status, 'success');
assert(summary.responseChars > 0, 'responseChars 应大于 0');
const records = readPersistedRecords();
const persisted = records.find(r => r.summary?.requestId === summary.requestId);
assert(persisted, '应写入 JSONL');
assertEqual(persisted.summary.status, 'success');
assertEqual(persisted.summary.stream, false);
});
});
console.log('\n' + '═'.repeat(55));
console.log(` 结果: ${passed} 通过 / ${failed} 失败 / ${passed + failed} 总计`);
console.log('═'.repeat(55) + '\n');
if (failed > 0) process.exit(1);

View File

@@ -0,0 +1,177 @@
/**
* test/unit-openai-log-summary.mjs
*
* 回归测试summary 落盘模式仅保留问答摘要与少量元数据。
* 运行方式npm run build && node test/unit-openai-log-summary.mjs
*/
import fs from 'fs';
import path from 'path';
const LOG_DIR = '/tmp/cursor2api-openai-log-summary';
process.env.LOG_FILE_ENABLED = '1';
process.env.LOG_DIR = LOG_DIR;
process.env.LOG_PERSIST_MODE = 'summary';
const { handleOpenAIChatCompletions, handleOpenAIResponses } = await import('../dist/openai-handler.js');
const { clearAllLogs, getRequestSummaries } = await import('../dist/logger.js');
let passed = 0;
let failed = 0;
function assert(condition, msg) {
if (!condition) throw new Error(msg || 'Assertion failed');
}
function assertEqual(a, b, msg) {
const as = JSON.stringify(a);
const bs = JSON.stringify(b);
if (as !== bs) throw new Error(msg || `Expected ${bs}, got ${as}`);
}
function createCursorSseResponse(deltas) {
const encoder = new TextEncoder();
const stream = new ReadableStream({
start(controller) {
for (const delta of deltas) {
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: 'text-delta', delta })}\n\n`));
}
controller.close();
},
});
return new Response(stream, {
status: 200,
headers: { 'Content-Type': 'text/event-stream' },
});
}
class MockResponse {
constructor() {
this.statusCode = 200;
this.headers = {};
this.body = '';
this.ended = false;
}
writeHead(statusCode, headers) {
this.statusCode = statusCode;
this.headers = { ...this.headers, ...headers };
}
write(chunk) {
this.body += String(chunk);
return true;
}
end(chunk = '') {
this.body += String(chunk);
this.ended = true;
}
json(obj) {
this.writeHead(this.statusCode, { 'Content-Type': 'application/json' });
this.end(JSON.stringify(obj));
}
status(code) {
this.statusCode = code;
return this;
}
}
function resetLogs() {
clearAllLogs();
fs.rmSync(LOG_DIR, { recursive: true, force: true });
}
function latestPersistedRecord() {
const files = fs.readdirSync(LOG_DIR).filter(name => name.endsWith('.jsonl')).sort();
assert(files.length > 0, '应生成 JSONL 文件');
const file = path.join(LOG_DIR, files[files.length - 1]);
const lines = fs.readFileSync(file, 'utf8').split('\n').filter(Boolean);
assert(lines.length > 0, 'JSONL 不应为空');
return JSON.parse(lines[lines.length - 1]);
}
function latestSummary() {
return getRequestSummaries(10)[0];
}
async function withMockCursor(deltas, fn) {
const originalFetch = global.fetch;
global.fetch = async () => createCursorSseResponse(deltas);
try {
await fn();
} finally {
global.fetch = originalFetch;
}
}
async function runTest(name, fn) {
try {
resetLogs();
await fn();
console.log(`${name}`);
passed++;
} catch (e) {
console.error(`${name}`);
console.error(` ${e.message}`);
failed++;
}
}
console.log('\n📦 [1] summary 落盘模式回归\n');
await runTest('Chat Completions summary 模式只保留 question / answer', async () => {
await withMockCursor(['Hello', ' world'], async () => {
const req = {
method: 'POST',
path: '/v1/chat/completions',
body: {
model: 'gpt-4.1',
stream: true,
messages: [{ role: 'user', content: 'Please say hello in English.' }],
},
};
const res = new MockResponse();
await handleOpenAIChatCompletions(req, res);
const summary = latestSummary();
assert(summary, '应生成 summary');
assertEqual(summary.status, 'success');
const persisted = latestPersistedRecord();
assertEqual(persisted.summary.path, '/v1/chat/completions');
assert(persisted.payload.question.includes('Please say hello'), '应保留用户问题摘要');
assert(persisted.payload.answer.includes('Hello world'), '应保留模型回答摘要');
assertEqual(persisted.payload.answerType, 'text');
assertEqual(persisted.payload.messages, undefined, 'summary 模式不应保留 messages');
assertEqual(persisted.payload.finalResponse, undefined, 'summary 模式不应保留 finalResponse');
assertEqual(persisted.payload.rawResponse, undefined, 'summary 模式不应保留 rawResponse');
});
});
await runTest('Responses summary 模式也能提取 question / answer', async () => {
await withMockCursor(['Hello', ' world'], async () => {
const req = {
method: 'POST',
path: '/v1/responses',
body: {
model: 'gpt-4.1',
stream: false,
input: 'Please answer with a short hello.',
},
};
const res = new MockResponse();
await handleOpenAIResponses(req, res);
const persisted = latestPersistedRecord();
assertEqual(persisted.summary.path, '/v1/responses');
assert(persisted.payload.question.includes('short hello'), 'Responses summary 模式应保留问题摘要');
assert(persisted.payload.answer.includes('Hello world'), 'Responses summary 模式应保留回答摘要');
assertEqual(persisted.payload.answerType, 'text');
assertEqual(persisted.payload.originalRequest, undefined, 'summary 模式不应保留 originalRequest');
assertEqual(persisted.payload.cursorMessages, undefined, 'summary 模式不应保留 cursorMessages');
});
});
console.log('\n' + '═'.repeat(55));
console.log(` 结果: ${passed} 通过 / ${failed} 失败 / ${passed + failed} 总计`);
console.log('═'.repeat(55) + '\n');
if (failed > 0) process.exit(1);