随着人工智能技术的快速发展,大语言模型(LLM)应用开发已成为技术领域的重要趋势。本文将深入介绍当前最具影响力的8个AI开发框架与平台,包括它们的核心特性、技术优势、应用场景,并通过实际代码示例展示如何快速构建AI应用。无论您是经验丰富的开发者,还是刚开始探索AI应用开发的新手,都能在本文中找到有价值的参考信息。
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
import os
# 设置OpenAI API密钥
os.environ["OPENAI_API_KEY"] = "your-api-key"
# 1. 加载文档
loader = DirectoryLoader('./documents', glob="**/*.pdf")
documents = loader.load()
# 2. 文档分块
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
splits = text_splitter.split_documents(documents)
# 3. 创建向量存储
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(splits, embeddings)
# 4. 创建对话链
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
qa_chain = ConversationalRetrievalChain.from_llm(
llm,
vectorstore.as_retriever(),
return_source_documents=True,
)
# 5. 交互式问答
chat_history = []
def ask_question(question):
result = qa_chain({"question": question, "chat_history": chat_history})
chat_history.append((question, result["answer"]))
return result["answer"]
# 使用示例
question = "公司的年假政策是什么?"
answer = ask_question(question)
print(f"问题:{question}\n回答:{answer}")
from crewai import Agent, Task, Crew, Process
from langchain.llms import OpenAI
# 1. 创建智能体
writer = Agent(
role='作者',
goal='创作高质量的技术文章',
backstory='专业的技术文章作者,擅长将复杂概念简单化',
llm=OpenAI(temperature=0.7)
)
editor = Agent(
role='编辑',
goal='确保文章的质量和准确性',
backstory='资深技术编辑,注重文章的逻辑性和可读性',
llm=OpenAI(temperature=0.3)
)
reviewer = Agent(
role='审稿人',
goal='审查文章的技术准确性',
backstory='技术专家,负责确保文章的技术内容准确无误',
llm=OpenAI(temperature=0.2)
)
# 2. 创建任务
write_task = Task(
description='写一篇关于LangChain框架的技术文章,包含基本概念和示例代码',
agent=writer
)
edit_task = Task(
description='编辑和优化文章的结构和表达',
agent=editor
)
review_task = Task(
description='审查文章的技术准确性,并提供修改建议',
agent=reviewer
)
# 3. 创建和运行工作流
crew = Crew(
agents=[writer, editor, reviewer],
tasks=[write_task, edit_task, review_task],
process=Process.sequential
)
result = crew.kickoff()
print(result)
from autochain import AutoChain
from autochain.llm import OpenAIGPT
from autochain.memory import ConversationMemory
from autochain.schema import Message, Task
# 1. 配置AutoChain
chain = AutoChain(
llm=OpenAIGPT(api_key="your-api-key"),
memory=ConversationMemory()
)
# 2. 定义工单处理任务
class TicketProcessor(Task):
def process(self, ticket_content: str) -> dict:
# 分析工单内容
analysis = self.llm.analyze(ticket_content)
# 确定工单类型和优先级
ticket_type = analysis.get("type")
priority = analysis.get("priority")
# 生成回复
response = self.llm.generate_response(
context=analysis,
template="customer_service_response"
)
return {
"type": ticket_type,
"priority": priority,
"response": response,
"status": "processed"
}
# 3. 处理工单示例
processor = TicketProcessor()
ticket = """
客户反馈:无法登录系统,提示密码错误,
但我确定密码是正确的。这影响了我的工作,
请尽快处理!
"""
result = processor.process(ticket)
print(f"工单类型:{result['type']}")
print(f"优先级:{result['priority']}")
print(f"回复内容:{result['response']}")
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
from llama_index import StorageContext, load_index_from_storage
from llama_index.node_parser import SimpleNodeParser
import os
# 1. 配置环境
os.environ['OPENAI_API_KEY'] = 'your-api-key'
# 2. 加载不同来源的文档
documents = SimpleDirectoryReader(
input_dir='./data',
recursive=True
).load_data()
# 3. 文档解析
parser = SimpleNodeParser()
nodes = parser.get_nodes_from_documents(documents)
# 4. 创建索引
index = GPTVectorStoreIndex(nodes)
# 5. 持久化存储
index.storage_context.persist()
# 6. 创建查询引擎
query_engine = index.as_query_engine()
# 7. 问答示例
response = query_engine.query(
"我们公司的产品有哪些主要功能?"
)
print(response)
# 8. 加载已存在的索引
storage_context = StorageContext.from_defaults(
persist_dir="./storage"
)
loaded_index = load_index_from_storage(storage_context)
from ragflow import RAGFlow, DataSource, Retriever, Generator
from ragflow.embeddings import OpenAIEmbeddings
from ragflow.llm import OpenAI
# 1. 初始化RAGFlow
flow = RAGFlow(
embeddings=OpenAIEmbeddings(),
llm=OpenAI(model="gpt-3.5-turbo")
)
# 2. 配置数据源
datasource = DataSource()
datasource.add_files("./documents")
datasource.add_database(
connection_string="postgresql://user:pass@localhost/db"
)
# 3. 配置检索器
retriever = Retriever(
datasource,
search_type="hybrid",
top_k=3
)
# 4. 配置生成器
generator = Generator(
template="Q: {question}\nContext: {context}\nA: "
)
# 5. 构建pipeline
flow.set_retriever(retriever)
flow.set_generator(generator)
# 6. 问答示例
question = "我们的产品支持哪些编程语言?"
answer = flow.run(question)
print(f"问题:{question}")
print(f"回答:{answer}")
import requests
import json
class CustomerServiceBot:
def __init__(self, api_key):
self.api_key = api_key
self.api_endpoint = "https://api.dify.ai/v1"
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
self.conversation_id = None
def send_message(self, message):
"""发送消息并获取回复"""
url = f"{self.api_endpoint}/chat-messages"
payload = {
"messages": [{
"role": "user",
"content": message
}]
}
if self.conversation_id:
payload["conversation_id"] = self.conversation_id
response = requests.post(
url,
headers=self.headers,
json=payload
)
result = response.json()
# 保存会话ID用于上下文连续对话
if not self.conversation_id and "conversation_id" in result:
self.conversation_id = result["conversation_id"]
return result
def get_knowledge_base_answer(self, question):
"""从知识库获取答案"""
url = f"{self.api_endpoint}/knowledge-base/query"
payload = {
"query": question
}
response = requests.post(
url,
headers=self.headers,
json=payload
)
return response.json()
def main():
# 初始化客服机器人
bot = CustomerServiceBot(api_key="your-api-key")
# 示例问题
question = "如何重置密码?"
# 1. 先查询知识库
kb_response = bot.get_knowledge_base_answer(question)
if kb_response.get("answer"):
print(f"知识库回答:{kb_response['answer']}")
else:
# 2. 如果知识库没有答案,使用对话模式
chat_response = bot.send_message(question)
print(f"AI回答:{chat_response['response']}")
if __name__ == "__main__":
main()
import requests
from typing import Dict, Any
import json
class FlowiseClient:
def __init__(self, api_key: str, endpoint: str):
self.api_key = api_key
self.endpoint = endpoint
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
def run_flow(self, flow_id: str, input_data: Dict[str, Any]) -> Dict[str, Any]:
"""
调用预定义的Flowise流程
Args:
flow_id: Flowise流程ID
input_data: 输入参数
Returns:
Dict: 流程执行结果
"""
url = f"{self.endpoint}/api/v1/prediction/{flow_id}"
try:
response = requests.post(
url,
headers=self.headers,
json={"input": input_data}
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error calling Flowise API: {e}")
return {"error": str(e)}
class KnowledgeBaseQA:
def __init__(self, api_key: str, flow_id: str):
self.client = FlowiseClient(
api_key=api_key,
endpoint="your-flowise-endpoint"
)
self.flow_id = flow_id
def ask_question(self, question: str) -> str:
"""
向知识库提问
Args:
question: 问题内容
Returns:
str: 答案
"""
result = self.client.run_flow(
self.flow_id,
{"question": question}
)
if "error" in result:
return f"Error: {result['error']}"
return result.get("answer", "No answer found")
def main():
# 初始化知识库问答系统
qa_system = KnowledgeBaseQA(
api_key="your-api-key",
flow_id="your-flow-id"
)
# 示例问题
questions = [
"公司的技术栈是什么?",
"如何申请年假?",
"公司的发展历史?"
]
# 批量提问测试
for question in questions:
print(f"\n问题:{question}")
answer = qa_system.ask_question(question)
print(f"答案:{answer}")
if __name__ == "__main__":
main()
import requests
from typing import Dict, Optional, Any
import json
class CozeBot:
def __init__(self, api_key: str):
self.api_key = api_key
self.api_endpoint = "https://api.coze.cn/v1"
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
def send_message(self, message: str, user_id: str) -> Dict[str, Any]:
"""
发送消息到Coze处理
Args:
message: 用户消息
user_id: 用户ID
Returns:
Dict: 处理结果
"""
url = f"{self.api_endpoint}/chat"
payload = {
"message": message,
"user_id": user_id,
"platform": "wechat"
}
try:
response = requests.post(
url,
headers=self.headers,
json=payload
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return {"error": str(e)}
def get_plugin_response(self, plugin_id: str, params: Dict[str, Any]) -> Dict[str, Any]:
"""
调用Coze插件
Args:
plugin_id: 插件ID
params: 插件参数
Returns:
Dict: 插件执行结果
"""
url = f"{self.api_endpoint}/plugins/{plugin_id}/invoke"
try:
response = requests.post(
url,
headers=self.headers,
json=params
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return {"error": str(e)}
class WeChatHandler:
def __init__(self, api_key: str):
self.bot = CozeBot(api_key)
async def handle_message(self, message: str, user_id: str) -> str:
"""
处理微信消息
Args:
message: 用户消息
user_id: 用户ID
Returns:
str: 回复内容
"""
# 调用Coze处理消息
response = self.bot.send_message(message, user_id)
if "error" in response:
return f"Error: {response['error']}"
# 检查是否需要调用插件
if response.get("needPlugin"):
plugin_response = self.bot.get_plugin_response(
response["pluginId"],
response["pluginParams"]
)
if "error" in plugin_response:
return f"Plugin Error: {plugin_response['error']}"
return plugin_response.get("reply", "No reply from plugin")
return response.get("reply", "No response")
def main():
# 初始化微信消息处理器
handler = WeChatHandler(api_key="your-api-key")
# 模拟接收微信消息
messages = [
("推荐一些新品", "user123"),
("帮我查询订单状态", "user456"),
("如何联系客服", "user789")
]
# 测试消息处理
import asyncio
async def test_messages():
for message, user_id in messages:
print(f"\n收到消息 from {user_id}: {message}")
reply = await handler.handle_message(message, user_id)
print(f"回复内容: {reply}")
# 运行测试
asyncio.run(test_messages())
if __name__ == "__main__":
main()
在选择合适的AI开发框架或平台时,建议考虑以下几个关键因素:
开发需求匹配度
技术栈兼容性
项目规模与复杂度
部署与维护成本
随着AI技术的不断发展,我们可以预见这些框架和平台将在以下方面持续演进:
本文详细介绍了8个主流的AI开发框架与平台,每个工具都有其独特的优势和适用场景。开发者可以根据项目需求、技术栈和资源情况,选择最适合的工具开始AI应用开发之旅。随着这些框架和平台的不断成熟,AI应用开发将变得更加高效和便捷。