python爬虫(二):爬虫效率提升

文章目录

  • 1 需求描述
  • 2 方法说明
    • 2.1 多进程——threading
    • 2.2 多线程——multiprocessing
    • 2.2 协程——asyncio
  • 3 实战记录
    • 3.1 完整代码
    • 3.2 结果展示

参考资料
Ⅰ 并发执行
Ⅱ 如何优化 Python 爬虫的速度?

1 需求描述

爬取较多数据时如何避免用时过长,时下通用方式主要为多进程、多线程、协程和混合模式四种。

2 方法说明

2.1 多进程——threading

详见threading官方文档

2.2 多线程——multiprocessing

详见multiprocessing官方文档

2.2 协程——asyncio

详见asyncio官方文档

3 实战记录

3.1 完整代码

以爬取比比电子招标信息平台招标信息的网页全文为例进行演示说明,主要内容包括网页全文、项目名称、链接地址和发布时间。

import requests
from lxml import etree
from bs4 import BeautifulSoup

import re
import time
import csv

import multiprocessing

import aiohttp
import asyncio


header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/81.0.4044.113 Safari/537.36'}

with open('data.csv', 'w', newline='', encoding='utf-8') as f:
    csvwriter = csv.writer(f, dialect='excel')
    csvwriter.writerow(['detail', 'name', 'url', 'publishDate'])
f.close()
# ---------------获取某一页所有的url地址-------------- #
def get_link_url(link_url):
    time.sleep(3)
    response = requests.get(link_url, headers=header, timeout=5)
    # print(response.text)
    table = re.findall('(.*?)
'
, response.text, re.S | re.M)[0] urls = re.findall('(?<=href=\").*?(?=\")',table, re.S | re.M) # print(urls) return urls # ---------------获取链接详情-------------- # def get_link_info(url): time.sleep(3) with open('data.csv', 'a', newline='', encoding='utf-8') as f: response = requests.get(url, headers=header, timeout=5) soup = BeautifulSoup(response.text, 'lxml') # 将网页转换为soup对象 content = etree.HTML(response.text) # 将网页转换为xpath对象 html = soup.get_text() # 将网页转换为纯文本 # ---------清除字符串空格-------- # # detail = html.replace(' ','') # 无法同时处理换行等引起的空格 # detail = ''.join(html.split()) # 实用 detail = re.sub('[\r\n\s]','', html) # 匹配回车、换行和空白符(空格符、制表符和换页符等) # ============================= # #

霍州市2020年“四好农村路”窄路基路面拓宽改造工程施工招标公告

# ============================= # # ----------xpath---------- # # name = content.xpath('//h3/text()')[0] # publishDate = content.xpath('//p[contains(@class,"sum")]/text()')[0] # ----------BeautifulSoup---------- # # name = soup.select('h3')[0].get_text() # publishDate = soup.select('.sum')[0].get_text() # ----------re---------- # name = re.findall('(?<=

).*?(?=, response.text, re.S|re.M)[0] name = ''.join(re.split('\s',name)) publishDate = re.findall('(?<=发布时间:).*?(?=|)', html, re.S|re.M)[0] f.write('{},{},{},{}\n'.format(detail, name, url, publishDate)) f.close() async def get_asy_link_info(url): time.sleep(3) with open('data.csv', 'a', newline='', encoding='utf-8') as f: # response = await requests.get(url, headers=header, timeout=5) # 配套协程asyncio可能报错,故选用aiohttp session = aiohttp.ClientSession() response = session.get(url) soup = BeautifulSoup(response.text, 'lxml') # 将网页转换为soup对象 # content = etree.HTML(response.text) # 将网页转换为xpath对象 html = soup.get_text() # 将网页转换为纯文本 detail = re.sub('\s','', html) # ============================= # #

霍州市2020年“四好农村路”窄路基路面拓宽改造工程施工招标公告

# ============================= # # ----------xpath---------- # # name = content.xpath('//h3/text()')[0] # publishDate = content.xpath('//p[contains(@class,"sum")]/text()')[0] # ----------BeautifulSoup---------- # # name = soup.select('h3')[0].get_text() # publishDate = soup.select('.sum')[0].get_text() # ----------re---------- # name = re.findall('(?<=

).*?(?=, response.text, re.S|re.M)[0] name = ''.join(re.split('\s',name)) publishDate = re.findall('(?<=发布时间:).*?(?=|)', html, re.S|re.M)[0] f.write('{},{},{},{}\n'.format(detail, name, url, publishDate)) f.close() if __name__ == '__main__': # 列表推导式获取1~5页的url地址 link_urls = ['http://www.bitbid.cn/ggWeb!zhaobiaogg.action?gongShiType=1¤tPage={}&ggName=&type=&startDate=&endDate=&shengID=0'.format(i) for i in range(1, 6)] url_list = [] for link_url in link_urls: urls = get_link_url(link_url) url_list = url_list + urls # 将1~5页的url地址存入同一个列表url_list print(url_list) # **********************1 单进程************************** # t11 = time.time() # 常规单进程开始时间 for url in url_list: get_link_info(url) t12 = time.time() # 常规单进程结束时间 print('单进程', t12 - t11) # **********************2 多进程************************** # # ------------进程池---------- # t21 = time.time() # 进程池开始时间 pool = multiprocessing.Pool(processes=4) # 最大容量 multiprocessing.cpu_count() # for url in url_list: # pool.apply_async(get_link_info, args=(url,)) # 非阻塞方式 pool.map(get_link_info, url_list) t22 = time.time() # 进程池结束时间 pool.close() pool.join() print('进程池', t22 - t21) # ------------子进程---------- # t31 = time.time() # 子进程开始时间 for url in url_list: p = multiprocessing.Process(target=get_link_info, args=(url,)) p.start() p.join() t32 = time.time() # 子进程结束时间 print('子进程', t32 - t31) # **********************3 协程************************** # t51 = time.time() # 协程开始时间 loop = asyncio.get_event_loop() # 返回一个事件循环对象 tasks = [get_asy_link_info(url) for url in url_list] loop.run_until_complete(asyncio.wait(tasks)) # 直至对象运行完成,返回结果 loop.close() # 关闭事件循环 t52 = time.time() # 协程结束时间 print('协程', t52 - t51)

3.2 结果展示

单进程 237.6622188091278
进程池 65.96817064285278
子进程 348.5716996192932
协程 235.63298511505127

你可能感兴趣的:(爬虫,Python)