新增 Vercel 跨域代理支持和环境检测功能

- 在核心服务中添加 Vercel 环境检测和代理工具函数
- 更新 LLM 服务,支持通过 Vercel 代理解决跨域问题
- 扩展模型配置类型,新增 `useVercelProxy` 选项
- 在 ModelManager 组件中添加 Vercel 代理可用性检测和配置
- 更新技术开发指南,详细说明跨域代理解决方案
- 调整 Vercel 配置,支持 API 代理和环境变量设置
This commit is contained in:
linshen
2025-03-03 00:25:02 +08:00
parent 964a8676fc
commit bf9d98eb69
13 changed files with 553 additions and 59 deletions

View File

@@ -30,6 +30,7 @@ Prompt Optimizer是一个强大的AI提示词优化工具帮助你编写更
- 💾 隐私保护本地加密存储历史记录和API密钥确保数据安全
- 📱 多端支持同时提供Web应用和Chrome插件两种使用方式
- 🎨 用户体验:简洁直观的界面设计,响应式布局和流畅交互动效
- 🌐 跨域支持Vercel部署时支持使用Edge Runtime代理解决跨域问题可能会触发部分厂商风控
## 🚀 快速开始

95
api/proxy.js Normal file
View File

@@ -0,0 +1,95 @@
export const config = {
runtime: 'edge'
};
export default async function handler(req) {
// 处理CORS预检请求
if (req.method === 'OPTIONS') {
return new Response(null, {
status: 200,
headers: {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type, Authorization, X-API-KEY',
'Access-Control-Max-Age': '86400',
},
});
}
try {
// 解析请求数据
const { searchParams } = new URL(req.url);
const targetUrl = searchParams.get('targetUrl');
if (!targetUrl) {
return new Response(JSON.stringify({ error: '缺少目标URL参数' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
// 确保targetUrl是有效的URL
let validTargetUrl;
try {
validTargetUrl = new URL(decodeURIComponent(targetUrl)).toString();
console.log('目标URL:', validTargetUrl);
} catch (error) {
return new Response(JSON.stringify({ error: `无效的目标URL: ${error.message}` }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
// 准备请求头
const headers = new Headers();
req.headers.forEach((value, key) => {
// 排除一些特定的头,这些头可能会导致问题
if (!['host', 'connection', 'content-length'].includes(key.toLowerCase())) {
headers.set(key, value);
}
});
// 获取请求体
let body = null;
if (req.method !== 'GET' && req.method !== 'HEAD') {
body = await req.text();
}
// 发送请求到目标URL
const fetchResponse = await fetch(validTargetUrl, {
method: req.method,
headers,
body,
});
// 读取响应数据
const data = await fetchResponse.text();
// 创建响应头
const responseHeaders = new Headers();
fetchResponse.headers.forEach((value, key) => {
responseHeaders.set(key, value);
});
// 设置CORS头
responseHeaders.set('Access-Control-Allow-Origin', '*');
responseHeaders.set('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS');
responseHeaders.set('Access-Control-Allow-Headers', 'Content-Type, Authorization, X-API-KEY');
// 返回响应
return new Response(data, {
status: fetchResponse.status,
statusText: fetchResponse.statusText,
headers: responseHeaders,
});
} catch (error) {
console.error('代理请求失败:', error);
return new Response(JSON.stringify({ error: `代理请求失败: ${error.message}` }), {
status: 500,
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
},
});
}
}

155
api/stream.js Normal file
View File

@@ -0,0 +1,155 @@
// api/stream.js
export const config = {
runtime: 'edge'
};
export default async function handler(req) {
console.log('流式代理请求开始处理:', new Date().toISOString());
// 处理CORS预检请求
if (req.method === 'OPTIONS') {
console.log('处理CORS预检请求');
return new Response(null, {
status: 200,
headers: {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type, Authorization, X-API-KEY',
'Access-Control-Max-Age': '86400',
},
});
}
try {
// 解析请求数据
const { searchParams } = new URL(req.url);
const targetUrl = searchParams.get('targetUrl');
if (!targetUrl) {
console.error('缺少目标URL参数');
return new Response(JSON.stringify({ error: '缺少目标URL参数' }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
// 确保targetUrl是有效的URL
let validTargetUrl;
try {
validTargetUrl = new URL(decodeURIComponent(targetUrl)).toString();
console.log('目标URL:', validTargetUrl);
} catch (error) {
console.error('无效的目标URL:', error);
return new Response(JSON.stringify({ error: `无效的目标URL: ${error.message}` }), {
status: 400,
headers: { 'Content-Type': 'application/json' },
});
}
// 准备请求头
const headers = new Headers();
req.headers.forEach((value, key) => {
// 排除一些特定的头,这些头可能会导致问题
if (!['host', 'connection', 'content-length'].includes(key.toLowerCase())) {
headers.set(key, value);
}
});
console.log('请求方法:', req.method);
console.log('请求头数量:', [...headers.keys()].length);
// 获取请求体
let body = null;
if (req.method !== 'GET' && req.method !== 'HEAD') {
body = await req.text();
console.log('请求体长度:', body?.length || 0);
}
console.log('开始向目标URL发送请求:', new Date().toISOString());
// 发送请求到目标URL
const fetchResponse = await fetch(validTargetUrl, {
method: req.method,
headers,
body,
duplex: 'half', // 支持流式请求
});
console.log('收到目标URL响应:', new Date().toISOString(), '状态码:', fetchResponse.status);
// 创建响应头
const responseHeaders = new Headers();
fetchResponse.headers.forEach((value, key) => {
responseHeaders.set(key, value);
});
// 设置CORS头
responseHeaders.set('Access-Control-Allow-Origin', '*');
responseHeaders.set('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS');
responseHeaders.set('Access-Control-Allow-Headers', 'Content-Type, Authorization, X-API-KEY');
// 检查是否是SSE流
const contentType = fetchResponse.headers.get('content-type');
const isEventStream = contentType?.includes('text/event-stream');
console.log('响应内容类型:', contentType, '是否为SSE流:', isEventStream);
if (isEventStream) {
responseHeaders.set('Content-Type', 'text/event-stream');
responseHeaders.set('Cache-Control', 'no-cache');
responseHeaders.set('Connection', 'keep-alive');
// 确保不缓冲数据
responseHeaders.set('X-Accel-Buffering', 'no');
}
// 创建并返回流式响应使用TransformStream确保数据立即传输
const { readable, writable } = new TransformStream();
console.log('创建TransformStream完成');
// 启动数据传输过程
(async () => {
const writer = writable.getWriter();
const reader = fetchResponse.body.getReader();
try {
console.log('开始流式传输数据:', new Date().toISOString());
let chunkCount = 0;
let totalBytes = 0;
while (true) {
const { done, value } = await reader.read();
if (done) {
console.log('流式传输完成:', new Date().toISOString());
console.log(`总共传输 ${chunkCount} 个数据块,${totalBytes} 字节`);
await writer.close();
break;
}
// 立即写入数据并刷新
await writer.write(value);
chunkCount++;
totalBytes += value.length;
if (chunkCount % 10 === 0) {
console.log(`已传输 ${chunkCount} 个数据块,${totalBytes} 字节`);
}
}
} catch (error) {
console.error('流式传输错误:', error);
writer.abort(error);
}
})();
console.log('返回流式响应');
return new Response(readable, {
status: fetchResponse.status,
statusText: fetchResponse.statusText,
headers: responseHeaders,
});
} catch (error) {
console.error('流式代理请求失败:', error);
return new Response(JSON.stringify({ error: `流式代理请求失败: ${error.message}` }), {
status: 500,
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
},
});
}
}

23
api/vercel-status.js Normal file
View File

@@ -0,0 +1,23 @@
export const config = {
runtime: 'edge'
};
export default async function handler(request) {
return new Response(
JSON.stringify({
status: 'available',
environment: 'vercel',
proxySupport: true,
version: '1.0.0'
}),
{
status: 200,
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type'
}
}
);
}

26
dev.md
View File

@@ -51,7 +51,7 @@ docker build -t linshen/prompt-optimizer:$VERSION .
docker tag linshen/prompt-optimizer:$VERSION linshen/prompt-optimizer:latest
# 运行容器
docker run -d -p 80:80 --restart unless-stopped --name prompt-optimizer linshen/prompt-optimizer
docker run -d -p 80:80 --restart unless-stopped --name prompt-optimizer linshen/prompt-optimizer:$VERSION
# 推送
@@ -136,30 +136,6 @@ pnpm build:web
pnpm build:ext
```
### CI/CD集成
```yaml
# GitHub Actions示例
name: Docker Build and Push
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build and push
uses: docker/build-push-action@v5
with:
push: true
tags: prompt-optimizer:latest
```
### 常用Docker命令
```bash

View File

@@ -464,4 +464,53 @@
- iOS Safari >= 14
- Android Chrome >= 90
## 跨域代理解决方案
为了解决在纯前端应用中调用第三方LLM API时可能遇到的跨域问题我们实现了一个基于Vercel Edge Runtime的代理解决方案。
### 代理架构
1. **API代理**:用于处理普通请求
- 路径:`/api/proxy`
- 功能转发普通HTTP请求处理CORS头
2. **流式代理**:用于处理流式请求
- 路径:`/api/stream`
- 功能:转发流式响应,保持连接直到流结束
### 工作原理
1. 在生产环境中非localhost系统会自动检测是否需要使用代理
2. 所有API请求包括OpenAI都可以使用代理通过模型配置中的`useVercelProxy`选项控制
3. 代理会保留原始请求的所有头信息和请求体
4. 代理会添加必要的CORS头允许浏览器接收响应
### 代码实现
核心代理逻辑位于:
- `/api/proxy.js`:处理普通请求
- `/api/stream.js`:处理流式请求
环境检测逻辑位于:
- `packages/core/src/utils/environment.ts`
### 使用方式
对于开发者来说这个功能是透明的不需要额外配置。系统会自动检测Vercel环境并在模型配置中提供代理选项。
在模型配置界面中当检测到Vercel环境时会显示"使用Vercel代理"的选项。您可以为每个模型单独配置是否启用代理功能。
### 安全考虑
1. 代理仅转发请求,不存储任何数据
2. API密钥仍然由客户端直接发送不经过中间服务器处理
3. 所有请求都通过HTTPS加密传输
### 限制
1. Vercel Edge Functions有30秒的超时限制
2. 有每月带宽和请求数量限制
3. 首次请求可能有冷启动延迟
最后更新2024-03-02

View File

@@ -24,4 +24,13 @@ export * from './services/model/defaults'
// 导出提示词服务相关
export { PromptService, createPromptService } from './services/prompt/service'
export * from './services/prompt/types'
export * from './services/prompt/errors'
export * from './services/prompt/errors'
// 导出环境工具函数
export {
isBrowser,
isVercel,
getProxyUrl,
checkVercelApiAvailability,
resetVercelStatusCache
} from './utils/environment';

View File

@@ -4,6 +4,7 @@ import { ModelManager, modelManager as defaultModelManager } from '../model/mana
import { APIError, RequestConfigError, ERROR_MESSAGES } from './errors';
import OpenAI from 'openai';
import { GoogleGenerativeAI, GenerativeModel } from '@google/generative-ai';
import { isVercel, getProxyUrl } from '../../utils/environment';
/**
* LLM服务实现 - 基于官方SDK
@@ -60,8 +61,8 @@ export class LLMService implements ILLMService {
/**
* 获取OpenAI实例
*/
private getOpenAIInstance(modelConfig: ModelConfig): OpenAI {
const cacheKey = `${modelConfig.provider}-${modelConfig.defaultModel}`;
private getOpenAIInstance(modelConfig: ModelConfig, isStream: boolean = false): OpenAI {
const cacheKey = `${modelConfig.provider}-${modelConfig.defaultModel}-${isStream ? 'stream' : 'normal'}`;
if (this.openAIInstances.has(cacheKey)) {
return this.openAIInstances.get(cacheKey)!;
@@ -75,11 +76,29 @@ export class LLMService implements ILLMService {
processedBaseURL = processedBaseURL.slice(0, -'/chat/completions'.length);
}
const instance = new OpenAI({
// 使用代理处理跨域问题
let finalBaseURL = processedBaseURL;
// 如果模型配置启用了Vercel代理且当前环境是Vercel则使用代理
// 允许所有API包括OpenAI使用代理
if (modelConfig.useVercelProxy === true && isVercel() && processedBaseURL) {
finalBaseURL = getProxyUrl(processedBaseURL, isStream);
console.log(`使用${isStream ? '流式' : ''}API代理:`, finalBaseURL);
}
// 创建OpenAI实例配置
const config: any = {
apiKey: apiKey,
baseURL: processedBaseURL,
baseURL: finalBaseURL,
dangerouslyAllowBrowser: true
});
};
// 为流式请求添加额外配置
if (isStream) {
config.timeout = 30000; // 添加更短的超时时间,避免长时间等待
config.maxRetries = 2; // 添加更积极的重试策略
}
const instance = new OpenAI(config);
this.openAIInstances.set(cacheKey, instance);
return instance;
@@ -88,27 +107,34 @@ export class LLMService implements ILLMService {
/**
* 获取Gemini实例
*/
private getGeminiModel(modelConfig: ModelConfig, systemInstruction?: string): GenerativeModel {
private getGeminiModel(modelConfig: ModelConfig, systemInstruction?: string, isStream: boolean = false): GenerativeModel {
const apiKey = modelConfig.apiKey || '';
const genAI = new GoogleGenerativeAI(apiKey);
// 创建模型配置
const modelOptions: any = {
model: modelConfig.defaultModel,
generationConfig: {
maxOutputTokens: 2000,
temperature: 0.7,
topK: 40,
topP: 0.95,
}
model: modelConfig.defaultModel
};
// 如果有系统指令,添加到模型配置中
if (systemInstruction) {
modelOptions.systemInstruction = systemInstruction;
}
return genAI.getGenerativeModel(modelOptions);
// 处理baseURL如果以'/v1beta'结尾则去掉
let processedBaseURL = modelConfig.baseURL;
if (processedBaseURL?.endsWith('/v1beta')) {
processedBaseURL = processedBaseURL.slice(0, -'/v1beta'.length);
}
// 使用代理处理跨域问题
let finalBaseURL = processedBaseURL;
// 如果模型配置启用了Vercel代理且当前环境是Vercel则使用代理
// 允许所有API包括OpenAI使用代理
if (modelConfig.useVercelProxy === true && isVercel() && processedBaseURL) {
finalBaseURL = getProxyUrl(processedBaseURL, isStream);
console.log(`使用${isStream ? '流式' : ''}API代理:`, finalBaseURL);
}
return genAI.getGenerativeModel( modelOptions,{"baseUrl": finalBaseURL});
}
/**
@@ -143,7 +169,7 @@ export class LLMService implements ILLMService {
: '';
// 获取带有系统指令的模型实例
const model = this.getGeminiModel(modelConfig, systemInstruction);
const model = this.getGeminiModel(modelConfig, systemInstruction, false);
// 过滤出用户和助手消息
const conversationMessages = messages.filter(msg => msg.role !== 'system');
@@ -281,7 +307,8 @@ export class LLMService implements ILLMService {
modelConfig: ModelConfig,
callbacks: StreamHandlers
): Promise<void> {
const openai = this.getOpenAIInstance(modelConfig);
// 获取流式OpenAI实例
const openai = this.getOpenAIInstance(modelConfig, true);
const formattedMessages = messages.map(msg => ({
role: msg.role,
@@ -289,6 +316,7 @@ export class LLMService implements ILLMService {
}));
try {
console.log('开始创建流式请求...');
const stream = await openai.chat.completions.create({
model: modelConfig.defaultModel,
messages: formattedMessages,
@@ -298,7 +326,7 @@ export class LLMService implements ILLMService {
});
console.log('成功获取到流式响应');
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
if (content) {
@@ -306,8 +334,8 @@ export class LLMService implements ILLMService {
contentLength: content.length,
content: content.substring(0, 50) + (content.length > 50 ? '...' : '')
});
await callbacks.onToken(content);
callbacks.onToken(content);
// 添加小延迟让UI有时间更新
await new Promise(resolve => setTimeout(resolve, 10));
}
@@ -337,7 +365,7 @@ export class LLMService implements ILLMService {
: '';
// 获取带有系统指令的模型实例
const model = this.getGeminiModel(modelConfig, systemInstruction);
const model = this.getGeminiModel(modelConfig, systemInstruction, true);
// 过滤出用户和助手消息
const conversationMessages = messages.filter(msg => msg.role !== 'system');
@@ -360,10 +388,11 @@ export class LLMService implements ILLMService {
}
try {
console.log('开始创建Gemini流式请求...');
const result = await chat.sendMessageStream(lastUserMessage);
console.log('成功获取到流式响应');
for await (const chunk of result.stream) {
const text = chunk.text();
if (text) {

View File

@@ -44,7 +44,7 @@ export const defaultModels: Record<string, ModelConfig> = {
},
gemini: {
name: 'Gemini',
baseURL: 'https://generativelanguage.googleapis.com/v1beta',
baseURL: 'https://generativelanguage.googleapis.com',
models: ['gemini-2.0-flash'],
defaultModel: 'gemini-2.0-flash',
apiKey: GEMINI_API_KEY,

View File

@@ -16,6 +16,8 @@ export interface ModelConfig {
enabled: boolean;
/** 提供商 */
provider: 'deepseek' | 'gemini' | 'custom' | string;
/** 是否使用Vercel代理 */
useVercelProxy?: boolean;
}
/**

View File

@@ -0,0 +1,102 @@
/**
* 环境工具函数
*/
// 存储Vercel环境检测结果的缓存
let vercelStatusCache: { available: boolean; checked: boolean } = {
available: false,
checked: false
};
/**
* 检查是否在浏览器环境中
*/
export const isBrowser = (): boolean => {
return typeof window !== 'undefined';
};
/**
* 检测Vercel API是否可用
* 使用异步方式检测,结果会被缓存
*/
export const checkVercelApiAvailability = async (): Promise<boolean> => {
// 如果已经检查过,直接返回缓存结果
if (vercelStatusCache.checked) {
return vercelStatusCache.available;
}
if (!isBrowser()) {
vercelStatusCache = { available: false, checked: true };
return false;
}
try {
// 获取当前域名作为基础URL
const origin = window.location.origin;
const response = await fetch(`${origin}/api/vercel-status`, {
method: 'GET',
headers: { 'Content-Type': 'application/json' },
// 设置较短的超时时间,避免长时间等待
signal: AbortSignal.timeout(3000)
});
// 检查响应状态只有200状态码且内容proxySupport为true
if (response.status !== 200) {
vercelStatusCache = { available: false, checked: true };
console.log('[环境检测] 未检测到Vercel部署环境代理功能不可用');
return false;
}
// 解析JSON响应
const data = await response.json();
const isAvailable = data.status === 'available' && data.proxySupport === true;
vercelStatusCache = { available: isAvailable, checked: true };
if (isAvailable) {
console.log('[环境检测] 检测到Vercel部署环境代理功能可用');
} else {
console.log('[环境检测] 未检测到Vercel部署环境代理功能不可用');
}
return isAvailable;
} catch (error) {
console.log('[环境检测] Vercel API检测失败', error);
vercelStatusCache = { available: false, checked: true };
return false;
}
};
/**
* 重置环境检测缓存
* 用于在需要重新检测时调用
*/
export const resetVercelStatusCache = (): void => {
vercelStatusCache = { available: false, checked: false };
};
/**
* 检查是否在Vercel环境中同步版本使用缓存结果
*/
export const isVercel = (): boolean => {
// 如果未检查过返回false应用需要先调用异步检测方法
return vercelStatusCache.checked && vercelStatusCache.available;
};
/**
* 获取API代理URL
* @param baseURL 原始基础URL
* @param isStream 是否是流式请求
*/
export const getProxyUrl = (baseURL: string | undefined, isStream: boolean = false): string => {
if (!baseURL) {
return '';
}
// 获取当前域名作为基础URL
const origin = isBrowser() ? window.location.origin : '';
const proxyEndpoint = isStream ? 'stream' : 'proxy';
// 返回完整的绝对URL
return `${origin}/api/${proxyEndpoint}?targetUrl=${encodeURIComponent(baseURL)}`;
};

View File

@@ -117,6 +117,17 @@
class="w-full px-4 py-2 rounded-xl bg-black/20 border border-purple-600/50 text-white placeholder-white/30 focus:ring-2 focus:ring-purple-500/50 focus:border-transparent transition-all"
placeholder="输入新的 API 密钥(留空则保持不变)"/>
</div>
<div v-if="vercelProxyAvailable" class="flex items-center space-x-2">
<input
:id="`vercel-proxy-${editingModel.key}`"
v-model="editingModel.useVercelProxy"
type="checkbox"
class="w-4 h-4 text-purple-600 bg-black/20 border-purple-600/50 rounded focus:ring-purple-500/50"
/>
<label :for="`vercel-proxy-${editingModel.key}`" class="text-sm font-medium text-white/90">
使用Vercel代理 (解决跨域问题有一定风险请谨慎使用)
</label>
</div>
<div class="flex justify-end space-x-3 pt-4">
<button
type="button"
@@ -174,7 +185,7 @@
<label class="block text-sm font-medium text-white/90 mb-1.5">API 地址</label>
<input v-model="newModel.baseURL" type="url" required
class="w-full px-4 py-2 rounded-xl bg-black/20 border border-purple-600/50 text-white placeholder-white/30 focus:ring-2 focus:ring-purple-500/50 focus:border-transparent transition-all"
placeholder="https://api.example.com/v1/chat/completions"/>
placeholder="https://api.example.com/v1"/>
</div>
<div>
<label class="block text-sm font-medium text-white/90 mb-1.5">默认模型名称</label>
@@ -188,6 +199,17 @@
class="w-full px-4 py-2 rounded-xl bg-black/20 border border-purple-600/50 text-white placeholder-white/30 focus:ring-2 focus:ring-purple-500/50 focus:border-transparent transition-all"
placeholder="输入 API 密钥"/>
</div>
<div v-if="vercelProxyAvailable" class="flex items-center space-x-2">
<input
id="new-model-vercel-proxy"
v-model="newModel.useVercelProxy"
type="checkbox"
class="w-4 h-4 text-purple-600 bg-black/20 border-purple-600/50 rounded focus:ring-purple-500/50"
/>
<label for="new-model-vercel-proxy" class="text-sm font-medium text-white/90">
使用Vercel代理 (解决跨域问题有一定风险请谨慎使用)
</label>
</div>
<div class="flex justify-end space-x-3 pt-4">
<button
type="button"
@@ -226,7 +248,7 @@
<script setup>
import { ref, onMounted, defineEmits } from 'vue';
import { modelManager, createLLMService } from '@prompt-optimizer/core';
import { modelManager, createLLMService, checkVercelApiAvailability, resetVercelStatusCache } from '@prompt-optimizer/core';
import { useToast } from '../composables/useToast';
const toast = useToast();
@@ -241,8 +263,26 @@ const newModel = ref({
name: '',
baseURL: '',
defaultModel: '',
apiKey: ''
apiKey: '',
useVercelProxy: false
});
// 是否支持Vercel代理
const vercelProxyAvailable = ref(false);
// 检测Vercel代理是否可用
const checkVercelProxy = async () => {
try {
// 先重置缓存,确保每次都重新检测
resetVercelStatusCache();
// 使用core中的检测函数
const available = await checkVercelApiAvailability();
vercelProxyAvailable.value = available;
console.log('Vercel代理检测结果:', vercelProxyAvailable.value);
} catch (error) {
console.log('Vercel代理不可用:', error);
vercelProxyAvailable.value = false;
}
};
// 加载所有模型
const loadModels = () => {
@@ -320,7 +360,8 @@ const editModel = (key) => {
name: model.name,
baseURL: model.baseURL,
defaultModel: model.defaultModel,
apiKey: '' // 不显示原有的 API 密钥
apiKey: '', // 不显示原有的 API 密钥
useVercelProxy: model.useVercelProxy
};
isEditing.value = true;
}
@@ -351,7 +392,8 @@ const saveEdit = async () => {
defaultModel: editingModel.value.defaultModel,
apiKey: editingModel.value.apiKey.trim() || originalConfig.apiKey,
enabled: originalConfig.enabled,
provider: originalConfig.provider
provider: originalConfig.provider,
useVercelProxy: editingModel.value.useVercelProxy
}
console.log('新配置:', config);
@@ -364,9 +406,7 @@ const saveEdit = async () => {
isEditing.value = false
editingModel.value = null
// 修改这里,传递被编辑的模型的 key
toast.success('模型配置已更新')
} catch (error) {
console.error('更新模型失败:', error)
@@ -384,7 +424,8 @@ const addCustomModel = async () => {
defaultModel: newModel.value.defaultModel,
apiKey: newModel.value.apiKey,
enabled: true,
provider: 'custom'
provider: 'custom',
useVercelProxy: newModel.value.useVercelProxy
}
modelManager.addModel(newModel.value.key, config)
@@ -397,7 +438,8 @@ const addCustomModel = async () => {
name: '',
baseURL: '',
defaultModel: '',
apiKey: ''
apiKey: '',
useVercelProxy: false
}
toast.success('模型添加成功')
} catch (error) {
@@ -443,6 +485,7 @@ const disableModel = async (key) => {
// 初始化
onMounted(() => {
loadModels();
checkVercelProxy();
});
</script>

View File

@@ -3,6 +3,10 @@
"outputDirectory": "packages/web/dist",
"installCommand": "pwd && if [[ $(pwd) == */packages/extension ]]; then cd ../.. && pnpm install; else pnpm install; fi",
"rewrites": [
{
"source": "/api/:path*",
"destination": "/api/:path*"
},
{
"source": "/(.*)",
"destination": "/index.html"
@@ -12,5 +16,11 @@
"silent": true
},
"env": {
"VERCEL_DEPLOYMENT": "true"
},
"build": {
"env": {
"VERCEL_DEPLOYMENT": "true"
}
}
}