使用示例
通过实际的代码示例,了解如何在不同场景下使用 GPT-Load, 快速集成到您的应用中。
JavaScript / Node.js
基础使用
// 使用 fetch API
const response = await fetch('http://localhost:3000/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer sk-your-api-key'
},
body: JSON.stringify({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Hello, world!' }
]
})
});
const data = await response.json();
console.log(data.choices[0].message.content);
使用 OpenAI SDK
import OpenAI from 'openai';
// 配置 GPT-Load 作为代理
const openai = new OpenAI({
apiKey: 'sk-your-api-key',
baseURL: 'http://localhost:3000/v1'
});
async function chatCompletion() {
const completion = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Hello, world!' }
]
});
console.log(completion.choices[0].message.content);
}
chatCompletion();
Python
使用 requests 库
import requests
import json
def chat_with_gpt(message):
url = "http://localhost:3000/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer sk-your-api-key"
}
data = {
"model": "gpt-3.5-turbo",
"messages": [
{"role": "user", "content": message}
]
}
response = requests.post(url, headers=headers, json=data)
return response.json()["choices"][0]["message"]["content"]
# 使用示例
result = chat_with_gpt("Hello, world!")
print(result)
使用 OpenAI Python SDK
from openai import OpenAI
# 配置 GPT-Load 作为代理
client = OpenAI(
api_key="sk-your-api-key",
base_url="http://localhost:3000/v1"
)
def chat_completion(message):
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": message}
]
)
return completion.choices[0].message.content
# 使用示例
result = chat_completion("Hello, world!")
print(result)
cURL 命令行
聊天补全
curl -X POST http://localhost:3000/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer sk-your-api-key" \
-d '{
"model": "gpt-3.5-turbo",
"messages": [
{"role": "user", "content": "Hello, world!"}
],
"max_tokens": 150,
"temperature": 0.7
}'
流式响应
curl -X POST http://localhost:3000/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer sk-your-api-key" \
-d '{
"model": "gpt-3.5-turbo",
"messages": [
{"role": "user", "content": "写一首关于春天的诗"}
],
"stream": true
}' \
--no-buffer
前端集成
React 组件示例
import { useState } from 'react';
function ChatComponent() {
const [message, setMessage] = useState('');
const [response, setResponse] = useState('');
const [loading, setLoading] = useState(false);
const sendMessage = async () => {
setLoading(true);
try {
const res = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ message }),
});
const data = await res.json();
setResponse(data.response);
} catch (error) {
console.error('Error:', error);
} finally {
setLoading(false);
}
};
return (
<div className="chat-component">
<textarea
value={message}
onChange={(e) => setMessage(e.target.value)}
placeholder="输入您的消息..."
/>
<button onClick={sendMessage} disabled={loading}>
{loading ? '发送中...' : '发送'}
</button>
{response && (
<div className="response">
<h3>AI 回复:</h3>
<p>{response}</p>
</div>
)}
</div>
);
}
export default ChatComponent;
高级用法
错误处理和重试
async function robustChatCompletion(message, maxRetries = 3) {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const response = await fetch('http://localhost:3000/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer sk-your-api-key'
},
body: JSON.stringify({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: message }]
})
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
return data.choices[0].message.content;
} catch (error) {
console.error(`Attempt ${attempt} failed:`, error);
if (attempt === maxRetries) {
throw new Error(`Failed after ${maxRetries} attempts`);
}
// 指数退避
await new Promise(resolve =>
setTimeout(resolve, Math.pow(2, attempt) * 1000)
);
}
}
}
并发请求处理
async function batchProcess(messages) {
const promises = messages.map(async (message, index) => {
try {
const response = await fetch('http://localhost:3000/v1/chat/completions', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer sk-your-api-key'
},
body: JSON.stringify({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: message }]
})
});
const data = await response.json();
return {
index,
success: true,
result: data.choices[0].message.content
};
} catch (error) {
return {
index,
success: false,
error: error.message
};
}
});
const results = await Promise.allSettled(promises);
return results.map(result => result.value);
}
// 使用示例
const messages = [
"什么是人工智能?",
"解释机器学习的基本概念",
"深度学习与传统编程的区别"
];
batchProcess(messages).then(results => {
results.forEach((result, index) => {
if (result.success) {
console.log(`消息 ${index + 1} 处理成功:`, result.result);
} else {
console.error(`消息 ${index + 1} 处理失败:`, result.error);
}
});
});
💡 配置提示
• 生产环境建议设置适当的超时时间和重试机制
• 使用多个 API 密钥时,GPT-Load 会自动进行负载均衡
• 监控 /stats
端点了解系统性能
• 根据您的应用需求调整 MAX_CONCURRENT_REQUESTS