大型语言模型(LLM)是OpenHands代理系统的核心驱动力。本笔记将深入探讨LLM API调用的基本原理,以及如何在实践中实现与Claude等先进模型的基础连接模块,为构建AI代理系统奠定基础。
以OpenAI/Anthropic风格API为例:
{
"id": "response-123",
"object": "chat.completion",
"created": 1699933901,
"model": "claude-sonnet-4-20250514",
"content": [
{
"type": "text",
"text": "模型生成的回复内容"
},
{
"type": "tool_call",
"tool_call": { /* 工具调用信息 */ }
}
],
"usage": {
"prompt_tokens": 120,
"completion_tokens": 350,
"total_tokens": 470
}
}
从README_CN.md可知,OpenHands优先支持:
Anthropic Claude Sonnet 4:
anthropic/claude-sonnet-4-20250514
其他支持的模型:
// llm-client.js
const axios = require('axios');
const EventEmitter = require('events');
class LLMClient extends EventEmitter {
constructor(config) {
super();
this.apiKey = config.apiKey;
this.baseURL = config.baseURL || 'https://api.anthropic.com';
this.model = config.model || 'claude-sonnet-4-20250514';
this.defaultParams = {
temperature: 0.7,
max_tokens: 2048,
top_p: 0.95,
};
}
// 标准API调用
async complete(prompt, systemPrompt, options = {}) {
try {
const response = await axios({
method: 'POST',
url: `${this.baseURL}/v1/messages`,
headers: {
'Content-Type': 'application/json',
'x-api-key': this.apiKey,
'anthropic-version': '2023-06-01'
},
data: {
model: this.model,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: prompt }
],
...this.defaultParams,
...options
}
});
return response.data;
} catch (error) {
console.error('LLM API Error:', error.response?.data || error.message);
throw error;
}
}
// 流式响应API调用
async streamComplete(prompt, systemPrompt, options = {}) {
try {
const response = await axios({
method: 'POST',
url: `${this.baseURL}/v1/messages`,
headers: {
'Content-Type': 'application/json',
'x-api-key': this.apiKey,
'anthropic-version': '2023-06-01'
},
data: {
model: this.model,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: prompt }
],
stream: true,
...this.defaultParams,
...options
},
responseType: 'stream'
});
return this._handleStream(response.data);
} catch (error) {
console.error('LLM Streaming API Error:', error.response?.data || error.message);
throw error;
}
}
// 处理流式响应
_handleStream(stream) {
let buffer = '';
stream.on('data', (chunk) => {
const chunkStr = chunk.toString();
buffer += chunkStr;
// 处理SSE格式
const lines = buffer.split('\n\n');
buffer = lines.pop();
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') {
this.emit('done');
continue;
}
try {
const parsed = JSON.parse(data);
if (parsed.type === 'content_block_delta' && parsed.delta.type === 'text') {
this.emit('token', parsed.delta.text);
} else if (parsed.type === 'tool_use') {
this.emit('tool', parsed.tool_use);
}
} catch (e) {
console.error('Error parsing stream data:', e);
}
}
}
});
stream.on('end', () => {
this.emit('end');
});
stream.on('error', (err) => {
this.emit('error', err);
});
return this;
}
// 使用工具函数
async completeWithTools(prompt, systemPrompt, tools, options = {}) {
try {
const response = await axios({
method: 'POST',
url: `${this.baseURL}/v1/messages`,
headers: {
'Content-Type': 'application/json',
'x-api-key': this.apiKey,
'anthropic-version': '2023-06-01'
},
data: {
model: this.model,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: prompt }
],
tools: tools,
...this.defaultParams,
...options
}
});
return response.data;
} catch (error) {
console.error('LLM Tools API Error:', error.response?.data || error.message);
throw error;
}
}
}
module.exports = LLMClient;
// model-manager.js
const LLMClient = require('./llm-client');
const fs = require('fs').promises;
const path = require('path');
class ModelManager {
constructor(configPath) {
this.configPath = configPath || path.join(process.cwd(), 'config', 'models.json');
this.clients = {};
}
// 初始化模型管理器
async initialize() {
try {
await this._loadConfiguration();
console.log(`Loaded ${Object.keys(this.modelConfigs).length} model configurations`);
} catch (error) {
console.error('Failed to initialize model manager:', error);
// 创建默认配置
this.modelConfigs = {
'claude': {
provider: 'anthropic',
baseURL: 'https://api.anthropic.com',
defaultModel: 'claude-sonnet-4-20250514',
requiresApiKey: true
},
// 添加其他模型配置
};
// 保存默认配置
await this._saveConfiguration();
}
}
// 加载模型配置
async _loadConfiguration() {
const configDir = path.dirname(this.configPath);
try {
await fs.mkdir(configDir, { recursive: true });
} catch (err) {
// 目录已存在,忽略错误
}
try {
const configData = await fs.readFile(this.configPath, 'utf8');
this.modelConfigs = JSON.parse(configData);
} catch (error) {
if (error.code === 'ENOENT') {
// 文件不存在,创建默认配置
throw new Error('Config file not found');
} else {
throw error;
}
}
}
// 保存模型配置
async _saveConfiguration() {
try {
await fs.writeFile(
this.configPath,
JSON.stringify(this.modelConfigs, null, 2),
'utf8'
);
} catch (error) {
console.error('Failed to save model configuration:', error);
}
}
// 获取LLM客户端
getClient(provider, apiKey) {
if (!this.modelConfigs[provider]) {
throw new Error(`Unknown provider: ${provider}`);
}
if (this.modelConfigs[provider].requiresApiKey && !apiKey) {
throw new Error(`API key required for provider: ${provider}`);
}
// 缓存客户端实例
const cacheKey = `${provider}_${apiKey}`;
if (!this.clients[cacheKey]) {
const config = this.modelConfigs[provider];
this.clients[cacheKey] = new LLMClient({
apiKey,
baseURL: config.baseURL,
model: config.defaultModel
});
}
return this.clients[cacheKey];
}
// 获取支持的模型列表
getAvailableModels() {
return Object.keys(this.modelConfigs).map(key => {
const config = this.modelConfigs[key];
return {
id: key,
name: config.name || key,
defaultModel: config.defaultModel,
requiresApiKey: config.requiresApiKey
};
});
}
}
module.exports = ModelManager;
// server.js
const express = require('express');
const bodyParser = require('body-parser');
const ModelManager = require('./model-manager');
const app = express();
app.use(bodyParser.json());
// 初始化模型管理器
const modelManager = new ModelManager();
// 启动服务前初始化模型管理器
async function startServer() {
await modelManager.initialize();
// 获取可用模型列表
app.get('/api/models', (req, res) => {
const models = modelManager.getAvailableModels();
res.json({ models });
});
// 调用LLM API
app.post('/api/chat', async (req, res) => {
try {
const { provider, apiKey, prompt, systemPrompt, options } = req.body;
if (!provider || !apiKey || !prompt) {
return res.status(400).json({ error: 'Missing required parameters' });
}
const client = modelManager.getClient(provider, apiKey);
const response = await client.complete(
prompt,
systemPrompt || 'You are a helpful AI assistant.',
options
);
res.json(response);
} catch (error) {
res.status(500).json({ error: error.message });
}
});
// 流式API调用
app.post('/api/stream', async (req, res) => {
try {
const { provider, apiKey, prompt, systemPrompt, options } = req.body;
if (!provider || !apiKey || !prompt) {
return res.status(400).json({ error: 'Missing required parameters' });
}
// 设置SSE响应头
res.setHeader('Content-Type', 'text/event-stream');
res.setHeader('Cache-Control', 'no-cache');
res.setHeader('Connection', 'keep-alive');
const client = modelManager.getClient(provider, apiKey);
const stream = await client.streamComplete(
prompt,
systemPrompt || 'You are a helpful AI assistant.',
options
);
stream.on('token', (token) => {
res.write(`data: ${JSON.stringify({ type: 'token', content: token })}\n\n`);
});
stream.on('tool', (tool) => {
res.write(`data: ${JSON.stringify({ type: 'tool', content: tool })}\n\n`);
});
stream.on('end', () => {
res.write(`data: ${JSON.stringify({ type: 'end' })}\n\n`);
res.end();
});
stream.on('error', (err) => {
res.write(`data: ${JSON.stringify({ type: 'error', content: err.message })}\n\n`);
res.end();
});
// 处理客户端断开连接
req.on('close', () => {
// 可以在这里清理资源
});
} catch (error) {
res.status(500).json({ error: error.message });
}
});
// 启动服务
const PORT = process.env.PORT || 3000;
app.listen(PORT, () => {
console.log(`LLM API server running on port ${PORT}`);
});
}
startServer().catch(error => {
console.error('Failed to start server:', error);
process.exit(1);
});
创建简单的前端测试页面 (index.html):
DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LLM测试界面title>
<style>
body { font-family: Arial, sans-serif; max-width: 800px; margin: 0 auto; padding: 20px; }
.container { display: flex; flex-direction: column; gap: 10px; }
textarea { height: 100px; padding: 8px; }
button { padding: 8px 16px; background: #0066ff; color: white; border: none; cursor: pointer; }
#response { white-space: pre-wrap; border: 1px solid #ccc; padding: 10px; min-height: 200px; }
.form-group { display: flex; flex-direction: column; gap: 5px; }
style>
head>
<body>
<h1>LLM API测试界面h1>
<div class="container">
<div class="form-group">
<label for="provider">提供商:label>
<select id="provider">
<option value="claude">Anthropic Claudeoption>
select>
div>
<div class="form-group">
<label for="apiKey">API密钥:label>
<input type="password" id="apiKey" placeholder="输入您的API密钥">
div>
<div class="form-group">
<label for="systemPrompt">系统提示:label>
<textarea id="systemPrompt">You are a helpful AI assistant with coding expertise.textarea>
div>
<div class="form-group">
<label for="prompt">用户提示:label>
<textarea id="prompt">Write a simple Python function to calculate the fibonacci sequence.textarea>
div>
<div class="form-group">
<button id="submitBtn">发送请求button>
<button id="streamBtn">流式请求button>
div>
<div class="form-group">
<label>响应:label>
<div id="response">div>
div>
div>
<script>
const responseElem = document.getElementById('response');
document.getElementById('submitBtn').addEventListener('click', async () => {
const provider = document.getElementById('provider').value;
const apiKey = document.getElementById('apiKey').value;
const systemPrompt = document.getElementById('systemPrompt').value;
const prompt = document.getElementById('prompt').value;
responseElem.textContent = '正在处理请求...';
try {
const response = await fetch('/api/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ provider, apiKey, systemPrompt, prompt })
});
const data = await response.json();
if (response.ok) {
responseElem.textContent = data.content[0].text;
} else {
responseElem.textContent = `错误: ${data.error}`;
}
} catch (error) {
responseElem.textContent = `请求失败: ${error.message}`;
}
});
document.getElementById('streamBtn').addEventListener('click', async () => {
const provider = document.getElementById('provider').value;
const apiKey = document.getElementById('apiKey').value;
const systemPrompt = document.getElementById('systemPrompt').value;
const prompt = document.getElementById('prompt').value;
responseElem.textContent = '';
try {
const response = await fetch('/api/stream', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ provider, apiKey, systemPrompt, prompt })
});
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n\n');
buffer = lines.pop();
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = JSON.parse(line.slice(6));
if (data.type === 'token') {
responseElem.textContent += data.content;
} else if (data.type === 'error') {
responseElem.textContent += `\n错误: ${data.content}`;
}
}
}
}
} catch (error) {
responseElem.textContent += `\n请求失败: ${error.message}`;
}
});
script>
body>
html>
LLM集成的核心要素:
OpenHands的模型选择:
LLM集成的挑战: