并发异步优化

for循环优化

原始 Demo(串行处理):

import time

def process_item(item):
    time.sleep(0.1)  # 模拟耗时操作
    return item * 2

start_time = time.time()
items = list(range(10))
results = []

for item in items:
    result = process_item(item)
    results.append(result)

print(f"串行处理耗时: {time.time() - start_time}秒")
print("结果:", results)

优化后 Demo(并行处理):

import asyncio
import time

def process_item(item):
    time.sleep(0.1)  # 模拟 CPU 密集操作
    return item * 2

async def main():
    items = list(range(10))
    
    # 将 CPU 密集型任务放到线程池执行
    tasks = [asyncio.to_thread(process_item, item) for item in items]
    
    # 并发执行所有任务
    results = await asyncio.gather(*tasks)
    
    return results

start_time = time.time()
results = asyncio.run(main())

print(f"协程 + 线程池处理耗时: {time.time() - start_time}秒")
print("结果:", results)

requests优化

原始 Demo(串行请求):

import time
import requests

def fetch(url):
    response = requests.get(url)
    return response.text[:100]  # 返回前100个字符

start_time = time.time()
urls = [
    "https://www.example.com",
    "https://www.github.com",
    "https://www.python.org"
] * 3  # 复制3份,共9个请求

results = []
for url in urls:
    result = fetch(url)
    results.append(result)

print(f"串行请求耗时: {time.time() - start_time}秒")
print(f"结果数量: {len(results)}")

优化后 Demo(异步请求):

import time
import asyncio
import aiohttp

async def fetch(url, session):
    async with session.get(url) as response:
        content = await response.text()
        return content[:100]  # 返回前100个字符

async def main():
    urls = [
        "https://www.example.com",
        "https://www.github.com",
        "https://www.python.org"
    ] * 3  # 复制3份,共9个请求
    
    async with aiohttp.ClientSession() as session:
        tasks = [fetch(url, session) for url in urls]
        results = await asyncio.gather(*tasks)
        return results

start_time = time.time()
results = asyncio.run(main())
print(f"异步请求耗时: {time.time() - start_time}秒")
print(f"结果数量: {len(results)}")

io文件优化

原始 Demo(同步 IO):

import time

def process_file(filename):
    with open(filename, 'r') as f:
        data = f.read()
    # 模拟处理数据
    processed_data = data.upper()
    with open(f"output_{filename}", 'w') as f:
        f.write(processed_data)
    return len(processed_data)

start_time = time.time()
filenames = [f"test_{i}.txt" for i in range(10)]

# 先创建测试文件
for filename in filenames:
    with open(filename, 'w') as f:
        f.write("Hello World\n" * 100000)  # 大文件

results = []
for filename in filenames:
    result = process_file(filename)
    results.append(result)

print(f"同步IO处理耗时: {time.time() - start_time}秒")
print("处理的字符数:", sum(results))

优化后 Demo(异步 IO):

import time
import asyncio
import aiofiles

async def process_file(filename):
    async with aiofiles.open(filename, 'r') as f:
        data = await f.read()
    # 模拟处理数据
    processed_data = data.upper()
    async with aiofiles.open(f"output_{filename}", 'w') as f:
        await f.write(processed_data)
    return len(processed_data)

async def main():
    filenames = [f"test_{i}.txt" for i in range(10)]
    
    # 先创建测试文件(同步方式,因aiofiles不适合写大文件)
    for filename in filenames:
        with open(filename, 'w') as f:
            f.write("Hello World\n" * 100000)  # 大文件
    
    tasks = [process_file(filename) for filename in filenames]
    results = await asyncio.gather(*tasks)
    return results

start_time = time.time()
results = asyncio.run(main())
print(f"异步IO处理耗时: {time.time() - start_time}秒")
print("处理的字符数:", sum(results))

你可能感兴趣的:(python,开发语言)