diff --git a/README.md b/README.md index c6c180c9..2d937797 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ Prompt Optimizer是一个强大的AI提示词优化工具,帮助你编写更 - 💾 隐私保护:本地加密存储历史记录和API密钥,确保数据安全 - 📱 多端支持:同时提供Web应用和Chrome插件两种使用方式 - 🎨 用户体验:简洁直观的界面设计,响应式布局和流畅交互动效 +- 🌐 跨域支持:Vercel部署时支持使用Edge Runtime代理解决跨域问题(可能会触发部分厂商风控) ## 🚀 快速开始 diff --git a/api/proxy.js b/api/proxy.js new file mode 100644 index 00000000..b1346f2a --- /dev/null +++ b/api/proxy.js @@ -0,0 +1,95 @@ +export const config = { + runtime: 'edge' +}; + +export default async function handler(req) { + // 处理CORS预检请求 + if (req.method === 'OPTIONS') { + return new Response(null, { + status: 200, + headers: { + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS', + 'Access-Control-Allow-Headers': 'Content-Type, Authorization, X-API-KEY', + 'Access-Control-Max-Age': '86400', + }, + }); + } + + try { + // 解析请求数据 + const { searchParams } = new URL(req.url); + const targetUrl = searchParams.get('targetUrl'); + + if (!targetUrl) { + return new Response(JSON.stringify({ error: '缺少目标URL参数' }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + + // 确保targetUrl是有效的URL + let validTargetUrl; + try { + validTargetUrl = new URL(decodeURIComponent(targetUrl)).toString(); + console.log('目标URL:', validTargetUrl); + } catch (error) { + return new Response(JSON.stringify({ error: `无效的目标URL: ${error.message}` }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + + // 准备请求头 + const headers = new Headers(); + req.headers.forEach((value, key) => { + // 排除一些特定的头,这些头可能会导致问题 + if (!['host', 'connection', 'content-length'].includes(key.toLowerCase())) { + headers.set(key, value); + } + }); + + // 获取请求体 + let body = null; + if (req.method !== 'GET' && req.method !== 'HEAD') { + body = await req.text(); + } + + // 发送请求到目标URL + const fetchResponse = await fetch(validTargetUrl, { + method: req.method, + headers, + body, + }); + + // 读取响应数据 + const data = await fetchResponse.text(); + + // 创建响应头 + const responseHeaders = new Headers(); + fetchResponse.headers.forEach((value, key) => { + responseHeaders.set(key, value); + }); + + // 设置CORS头 + responseHeaders.set('Access-Control-Allow-Origin', '*'); + responseHeaders.set('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS'); + responseHeaders.set('Access-Control-Allow-Headers', 'Content-Type, Authorization, X-API-KEY'); + + // 返回响应 + return new Response(data, { + status: fetchResponse.status, + statusText: fetchResponse.statusText, + headers: responseHeaders, + }); + } catch (error) { + console.error('代理请求失败:', error); + return new Response(JSON.stringify({ error: `代理请求失败: ${error.message}` }), { + status: 500, + headers: { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Origin': '*', + }, + }); + } +} \ No newline at end of file diff --git a/api/stream.js b/api/stream.js new file mode 100644 index 00000000..a697e09f --- /dev/null +++ b/api/stream.js @@ -0,0 +1,155 @@ +// api/stream.js +export const config = { + runtime: 'edge' +}; + +export default async function handler(req) { + console.log('流式代理请求开始处理:', new Date().toISOString()); + + // 处理CORS预检请求 + if (req.method === 'OPTIONS') { + console.log('处理CORS预检请求'); + return new Response(null, { + status: 200, + headers: { + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS', + 'Access-Control-Allow-Headers': 'Content-Type, Authorization, X-API-KEY', + 'Access-Control-Max-Age': '86400', + }, + }); + } + + try { + // 解析请求数据 + const { searchParams } = new URL(req.url); + const targetUrl = searchParams.get('targetUrl'); + + if (!targetUrl) { + console.error('缺少目标URL参数'); + return new Response(JSON.stringify({ error: '缺少目标URL参数' }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + + // 确保targetUrl是有效的URL + let validTargetUrl; + try { + validTargetUrl = new URL(decodeURIComponent(targetUrl)).toString(); + console.log('目标URL:', validTargetUrl); + } catch (error) { + console.error('无效的目标URL:', error); + return new Response(JSON.stringify({ error: `无效的目标URL: ${error.message}` }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + + // 准备请求头 + const headers = new Headers(); + req.headers.forEach((value, key) => { + // 排除一些特定的头,这些头可能会导致问题 + if (!['host', 'connection', 'content-length'].includes(key.toLowerCase())) { + headers.set(key, value); + } + }); + console.log('请求方法:', req.method); + console.log('请求头数量:', [...headers.keys()].length); + + // 获取请求体 + let body = null; + if (req.method !== 'GET' && req.method !== 'HEAD') { + body = await req.text(); + console.log('请求体长度:', body?.length || 0); + } + + console.log('开始向目标URL发送请求:', new Date().toISOString()); + // 发送请求到目标URL + const fetchResponse = await fetch(validTargetUrl, { + method: req.method, + headers, + body, + duplex: 'half', // 支持流式请求 + }); + console.log('收到目标URL响应:', new Date().toISOString(), '状态码:', fetchResponse.status); + + // 创建响应头 + const responseHeaders = new Headers(); + fetchResponse.headers.forEach((value, key) => { + responseHeaders.set(key, value); + }); + + // 设置CORS头 + responseHeaders.set('Access-Control-Allow-Origin', '*'); + responseHeaders.set('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS'); + responseHeaders.set('Access-Control-Allow-Headers', 'Content-Type, Authorization, X-API-KEY'); + + // 检查是否是SSE流 + const contentType = fetchResponse.headers.get('content-type'); + const isEventStream = contentType?.includes('text/event-stream'); + console.log('响应内容类型:', contentType, '是否为SSE流:', isEventStream); + + if (isEventStream) { + responseHeaders.set('Content-Type', 'text/event-stream'); + responseHeaders.set('Cache-Control', 'no-cache'); + responseHeaders.set('Connection', 'keep-alive'); + // 确保不缓冲数据 + responseHeaders.set('X-Accel-Buffering', 'no'); + } + + // 创建并返回流式响应,使用TransformStream确保数据立即传输 + const { readable, writable } = new TransformStream(); + console.log('创建TransformStream完成'); + + // 启动数据传输过程 + (async () => { + const writer = writable.getWriter(); + const reader = fetchResponse.body.getReader(); + + try { + console.log('开始流式传输数据:', new Date().toISOString()); + let chunkCount = 0; + let totalBytes = 0; + + while (true) { + const { done, value } = await reader.read(); + if (done) { + console.log('流式传输完成:', new Date().toISOString()); + console.log(`总共传输 ${chunkCount} 个数据块,${totalBytes} 字节`); + await writer.close(); + break; + } + + // 立即写入数据并刷新 + await writer.write(value); + chunkCount++; + totalBytes += value.length; + + if (chunkCount % 10 === 0) { + console.log(`已传输 ${chunkCount} 个数据块,${totalBytes} 字节`); + } + } + } catch (error) { + console.error('流式传输错误:', error); + writer.abort(error); + } + })(); + + console.log('返回流式响应'); + return new Response(readable, { + status: fetchResponse.status, + statusText: fetchResponse.statusText, + headers: responseHeaders, + }); + } catch (error) { + console.error('流式代理请求失败:', error); + return new Response(JSON.stringify({ error: `流式代理请求失败: ${error.message}` }), { + status: 500, + headers: { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Origin': '*', + }, + }); + } +} \ No newline at end of file diff --git a/api/vercel-status.js b/api/vercel-status.js new file mode 100644 index 00000000..f433969e --- /dev/null +++ b/api/vercel-status.js @@ -0,0 +1,23 @@ +export const config = { + runtime: 'edge' +}; + +export default async function handler(request) { + return new Response( + JSON.stringify({ + status: 'available', + environment: 'vercel', + proxySupport: true, + version: '1.0.0' + }), + { + status: 200, + headers: { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Methods': 'GET, OPTIONS', + 'Access-Control-Allow-Headers': 'Content-Type' + } + } + ); +} \ No newline at end of file diff --git a/dev.md b/dev.md index c87abc0b..d1edf0d4 100644 --- a/dev.md +++ b/dev.md @@ -51,7 +51,7 @@ docker build -t linshen/prompt-optimizer:$VERSION . docker tag linshen/prompt-optimizer:$VERSION linshen/prompt-optimizer:latest # 运行容器 -docker run -d -p 80:80 --restart unless-stopped --name prompt-optimizer linshen/prompt-optimizer +docker run -d -p 80:80 --restart unless-stopped --name prompt-optimizer linshen/prompt-optimizer:$VERSION # 推送 @@ -136,30 +136,6 @@ pnpm build:web pnpm build:ext ``` -### CI/CD集成 - -```yaml -# GitHub Actions示例 -name: Docker Build and Push - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Build and push - uses: docker/build-push-action@v5 - with: - push: true - tags: prompt-optimizer:latest -``` - ### 常用Docker命令 ```bash diff --git a/docs/technical-development-guide.md b/docs/technical-development-guide.md index a52234ec..d86d787c 100644 --- a/docs/technical-development-guide.md +++ b/docs/technical-development-guide.md @@ -464,4 +464,53 @@ - iOS Safari >= 14 - Android Chrome >= 90 +## 跨域代理解决方案 + +为了解决在纯前端应用中调用第三方LLM API时可能遇到的跨域问题,我们实现了一个基于Vercel Edge Runtime的代理解决方案。 + +### 代理架构 + +1. **API代理**:用于处理普通请求 + - 路径:`/api/proxy` + - 功能:转发普通HTTP请求,处理CORS头 + +2. **流式代理**:用于处理流式请求 + - 路径:`/api/stream` + - 功能:转发流式响应,保持连接直到流结束 + +### 工作原理 + +1. 在生产环境中(非localhost),系统会自动检测是否需要使用代理 +2. 所有API请求(包括OpenAI)都可以使用代理,通过模型配置中的`useVercelProxy`选项控制 +3. 代理会保留原始请求的所有头信息和请求体 +4. 代理会添加必要的CORS头,允许浏览器接收响应 + +### 代码实现 + +核心代理逻辑位于: +- `/api/proxy.js`:处理普通请求 +- `/api/stream.js`:处理流式请求 + +环境检测逻辑位于: +- `packages/core/src/utils/environment.ts` + +### 使用方式 + +对于开发者来说,这个功能是透明的,不需要额外配置。系统会自动检测Vercel环境并在模型配置中提供代理选项。 + +在模型配置界面中,当检测到Vercel环境时,会显示"使用Vercel代理"的选项。您可以为每个模型单独配置是否启用代理功能。 + + +### 安全考虑 + +1. 代理仅转发请求,不存储任何数据 +2. API密钥仍然由客户端直接发送,不经过中间服务器处理 +3. 所有请求都通过HTTPS加密传输 + +### 限制 + +1. Vercel Edge Functions有30秒的超时限制 +2. 有每月带宽和请求数量限制 +3. 首次请求可能有冷启动延迟 + 最后更新:2024-03-02 \ No newline at end of file diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index b7024b1d..331cd688 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -24,4 +24,13 @@ export * from './services/model/defaults' // 导出提示词服务相关 export { PromptService, createPromptService } from './services/prompt/service' export * from './services/prompt/types' -export * from './services/prompt/errors' \ No newline at end of file +export * from './services/prompt/errors' + +// 导出环境工具函数 +export { + isBrowser, + isVercel, + getProxyUrl, + checkVercelApiAvailability, + resetVercelStatusCache +} from './utils/environment'; \ No newline at end of file diff --git a/packages/core/src/services/llm/service.ts b/packages/core/src/services/llm/service.ts index ff1a3523..f5274fde 100644 --- a/packages/core/src/services/llm/service.ts +++ b/packages/core/src/services/llm/service.ts @@ -4,6 +4,7 @@ import { ModelManager, modelManager as defaultModelManager } from '../model/mana import { APIError, RequestConfigError, ERROR_MESSAGES } from './errors'; import OpenAI from 'openai'; import { GoogleGenerativeAI, GenerativeModel } from '@google/generative-ai'; +import { isVercel, getProxyUrl } from '../../utils/environment'; /** * LLM服务实现 - 基于官方SDK @@ -60,8 +61,8 @@ export class LLMService implements ILLMService { /** * 获取OpenAI实例 */ - private getOpenAIInstance(modelConfig: ModelConfig): OpenAI { - const cacheKey = `${modelConfig.provider}-${modelConfig.defaultModel}`; + private getOpenAIInstance(modelConfig: ModelConfig, isStream: boolean = false): OpenAI { + const cacheKey = `${modelConfig.provider}-${modelConfig.defaultModel}-${isStream ? 'stream' : 'normal'}`; if (this.openAIInstances.has(cacheKey)) { return this.openAIInstances.get(cacheKey)!; @@ -75,11 +76,29 @@ export class LLMService implements ILLMService { processedBaseURL = processedBaseURL.slice(0, -'/chat/completions'.length); } - const instance = new OpenAI({ + // 使用代理处理跨域问题 + let finalBaseURL = processedBaseURL; + // 如果模型配置启用了Vercel代理且当前环境是Vercel,则使用代理 + // 允许所有API包括OpenAI使用代理 + if (modelConfig.useVercelProxy === true && isVercel() && processedBaseURL) { + finalBaseURL = getProxyUrl(processedBaseURL, isStream); + console.log(`使用${isStream ? '流式' : ''}API代理:`, finalBaseURL); + } + + // 创建OpenAI实例配置 + const config: any = { apiKey: apiKey, - baseURL: processedBaseURL, + baseURL: finalBaseURL, dangerouslyAllowBrowser: true - }); + }; + + // 为流式请求添加额外配置 + if (isStream) { + config.timeout = 30000; // 添加更短的超时时间,避免长时间等待 + config.maxRetries = 2; // 添加更积极的重试策略 + } + + const instance = new OpenAI(config); this.openAIInstances.set(cacheKey, instance); return instance; @@ -88,27 +107,34 @@ export class LLMService implements ILLMService { /** * 获取Gemini实例 */ - private getGeminiModel(modelConfig: ModelConfig, systemInstruction?: string): GenerativeModel { + private getGeminiModel(modelConfig: ModelConfig, systemInstruction?: string, isStream: boolean = false): GenerativeModel { const apiKey = modelConfig.apiKey || ''; const genAI = new GoogleGenerativeAI(apiKey); // 创建模型配置 const modelOptions: any = { - model: modelConfig.defaultModel, - generationConfig: { - maxOutputTokens: 2000, - temperature: 0.7, - topK: 40, - topP: 0.95, - } + model: modelConfig.defaultModel }; // 如果有系统指令,添加到模型配置中 if (systemInstruction) { modelOptions.systemInstruction = systemInstruction; } - - return genAI.getGenerativeModel(modelOptions); + + // 处理baseURL,如果以'/v1beta'结尾则去掉 + let processedBaseURL = modelConfig.baseURL; + if (processedBaseURL?.endsWith('/v1beta')) { + processedBaseURL = processedBaseURL.slice(0, -'/v1beta'.length); + } + // 使用代理处理跨域问题 + let finalBaseURL = processedBaseURL; + // 如果模型配置启用了Vercel代理且当前环境是Vercel,则使用代理 + // 允许所有API包括OpenAI使用代理 + if (modelConfig.useVercelProxy === true && isVercel() && processedBaseURL) { + finalBaseURL = getProxyUrl(processedBaseURL, isStream); + console.log(`使用${isStream ? '流式' : ''}API代理:`, finalBaseURL); + } + return genAI.getGenerativeModel( modelOptions,{"baseUrl": finalBaseURL}); } /** @@ -143,7 +169,7 @@ export class LLMService implements ILLMService { : ''; // 获取带有系统指令的模型实例 - const model = this.getGeminiModel(modelConfig, systemInstruction); + const model = this.getGeminiModel(modelConfig, systemInstruction, false); // 过滤出用户和助手消息 const conversationMessages = messages.filter(msg => msg.role !== 'system'); @@ -281,7 +307,8 @@ export class LLMService implements ILLMService { modelConfig: ModelConfig, callbacks: StreamHandlers ): Promise { - const openai = this.getOpenAIInstance(modelConfig); + // 获取流式OpenAI实例 + const openai = this.getOpenAIInstance(modelConfig, true); const formattedMessages = messages.map(msg => ({ role: msg.role, @@ -289,6 +316,7 @@ export class LLMService implements ILLMService { })); try { + console.log('开始创建流式请求...'); const stream = await openai.chat.completions.create({ model: modelConfig.defaultModel, messages: formattedMessages, @@ -298,7 +326,7 @@ export class LLMService implements ILLMService { }); console.log('成功获取到流式响应'); - + for await (const chunk of stream) { const content = chunk.choices[0]?.delta?.content || ''; if (content) { @@ -306,8 +334,8 @@ export class LLMService implements ILLMService { contentLength: content.length, content: content.substring(0, 50) + (content.length > 50 ? '...' : '') }); - - await callbacks.onToken(content); + + callbacks.onToken(content); // 添加小延迟,让UI有时间更新 await new Promise(resolve => setTimeout(resolve, 10)); } @@ -337,7 +365,7 @@ export class LLMService implements ILLMService { : ''; // 获取带有系统指令的模型实例 - const model = this.getGeminiModel(modelConfig, systemInstruction); + const model = this.getGeminiModel(modelConfig, systemInstruction, true); // 过滤出用户和助手消息 const conversationMessages = messages.filter(msg => msg.role !== 'system'); @@ -360,10 +388,11 @@ export class LLMService implements ILLMService { } try { + console.log('开始创建Gemini流式请求...'); const result = await chat.sendMessageStream(lastUserMessage); console.log('成功获取到流式响应'); - + for await (const chunk of result.stream) { const text = chunk.text(); if (text) { diff --git a/packages/core/src/services/model/defaults.ts b/packages/core/src/services/model/defaults.ts index f07da35f..74beb967 100644 --- a/packages/core/src/services/model/defaults.ts +++ b/packages/core/src/services/model/defaults.ts @@ -44,7 +44,7 @@ export const defaultModels: Record = { }, gemini: { name: 'Gemini', - baseURL: 'https://generativelanguage.googleapis.com/v1beta', + baseURL: 'https://generativelanguage.googleapis.com', models: ['gemini-2.0-flash'], defaultModel: 'gemini-2.0-flash', apiKey: GEMINI_API_KEY, diff --git a/packages/core/src/services/model/types.ts b/packages/core/src/services/model/types.ts index 93a6e04d..24dd7bc5 100644 --- a/packages/core/src/services/model/types.ts +++ b/packages/core/src/services/model/types.ts @@ -16,6 +16,8 @@ export interface ModelConfig { enabled: boolean; /** 提供商 */ provider: 'deepseek' | 'gemini' | 'custom' | string; + /** 是否使用Vercel代理 */ + useVercelProxy?: boolean; } /** diff --git a/packages/core/src/utils/environment.ts b/packages/core/src/utils/environment.ts new file mode 100644 index 00000000..928bad60 --- /dev/null +++ b/packages/core/src/utils/environment.ts @@ -0,0 +1,102 @@ +/** + * 环境工具函数 + */ + +// 存储Vercel环境检测结果的缓存 +let vercelStatusCache: { available: boolean; checked: boolean } = { + available: false, + checked: false +}; + +/** + * 检查是否在浏览器环境中 + */ +export const isBrowser = (): boolean => { + return typeof window !== 'undefined'; +}; + +/** + * 检测Vercel API是否可用 + * 使用异步方式检测,结果会被缓存 + */ +export const checkVercelApiAvailability = async (): Promise => { + // 如果已经检查过,直接返回缓存结果 + if (vercelStatusCache.checked) { + return vercelStatusCache.available; + } + + if (!isBrowser()) { + vercelStatusCache = { available: false, checked: true }; + return false; + } + + try { + // 获取当前域名作为基础URL + const origin = window.location.origin; + const response = await fetch(`${origin}/api/vercel-status`, { + method: 'GET', + headers: { 'Content-Type': 'application/json' }, + // 设置较短的超时时间,避免长时间等待 + signal: AbortSignal.timeout(3000) + }); + + // 检查响应状态,只有200状态码且内容proxySupport为true + if (response.status !== 200) { + vercelStatusCache = { available: false, checked: true }; + console.log('[环境检测] 未检测到Vercel部署环境,代理功能不可用'); + return false; + } + + // 解析JSON响应 + const data = await response.json(); + const isAvailable = data.status === 'available' && data.proxySupport === true; + + vercelStatusCache = { available: isAvailable, checked: true }; + + if (isAvailable) { + console.log('[环境检测] 检测到Vercel部署环境,代理功能可用'); + } else { + console.log('[环境检测] 未检测到Vercel部署环境,代理功能不可用'); + } + + return isAvailable; + } catch (error) { + console.log('[环境检测] Vercel API检测失败', error); + vercelStatusCache = { available: false, checked: true }; + return false; + } +}; + +/** + * 重置环境检测缓存 + * 用于在需要重新检测时调用 + */ +export const resetVercelStatusCache = (): void => { + vercelStatusCache = { available: false, checked: false }; +}; + +/** + * 检查是否在Vercel环境中(同步版本,使用缓存结果) + */ +export const isVercel = (): boolean => { + // 如果未检查过,返回false,应用需要先调用异步检测方法 + return vercelStatusCache.checked && vercelStatusCache.available; +}; + +/** + * 获取API代理URL + * @param baseURL 原始基础URL + * @param isStream 是否是流式请求 + */ +export const getProxyUrl = (baseURL: string | undefined, isStream: boolean = false): string => { + if (!baseURL) { + return ''; + } + + // 获取当前域名作为基础URL + const origin = isBrowser() ? window.location.origin : ''; + const proxyEndpoint = isStream ? 'stream' : 'proxy'; + + // 返回完整的绝对URL + return `${origin}/api/${proxyEndpoint}?targetUrl=${encodeURIComponent(baseURL)}`; +}; \ No newline at end of file diff --git a/packages/ui/src/components/ModelManager.vue b/packages/ui/src/components/ModelManager.vue index cb2ad478..5b4e1f6c 100644 --- a/packages/ui/src/components/ModelManager.vue +++ b/packages/ui/src/components/ModelManager.vue @@ -117,6 +117,17 @@ class="w-full px-4 py-2 rounded-xl bg-black/20 border border-purple-600/50 text-white placeholder-white/30 focus:ring-2 focus:ring-purple-500/50 focus:border-transparent transition-all" placeholder="输入新的 API 密钥(留空则保持不变)"/> +
+ + +
@@ -188,6 +199,17 @@ class="w-full px-4 py-2 rounded-xl bg-black/20 border border-purple-600/50 text-white placeholder-white/30 focus:ring-2 focus:ring-purple-500/50 focus:border-transparent transition-all" placeholder="输入 API 密钥"/>
+
+ + +