python多线程爬虫和异步爬虫_Python多线程、异步+多进程爬虫实现代码

安装Tornado

省事点可以直接用grequests库,下面用的是tornado的异步client。 异步用到了tornado,根据官方文档的例子修改得到一个简单的异步爬虫类。可以参考下最新的文档学习下。

pip install tornado

异步爬虫

#!/usr/bin/env python

# -*- coding:utf-8 -*-

import time

from datetime import timedelta

from tornado import httpclient, gen, ioloop, queues

import traceback

class AsySpider(object):

"""A simple class of asynchronous spider."""

def __init__(self, urls, concurrency=10, **kwargs):

urls.reverse()

self.urls = urls

self.concurrency = concurrency

self._q = queues.Queue()

self._fetching = set()

self._fetched = set()

def fetch(self, url, **kwargs):

fetch = getattr(httpclient.AsyncHTTPClient(), 'fetch')

return fetch(url, **kwargs)

def handle_html(self, url, html):

"""handle html page"""

print(url)

def handle_response(self, url, response):

"""inherit and rewrite this method"""

if response.code == 200:

self.handle_html(url, response.body)

elif response.code == 599: # retry

self._fetching.remove(url)

self._q.put(url)

@gen.coroutine

def get_page(self, url):

try:

response = yield self.fetch(url)

print('######fetched %s' % url)

except Exception as e:

print('Exception: %s %s' % (e, url))

raise gen.Return(e)

raise gen.Return(response)

@gen.coroutine

def _run(self):

@gen.coroutine

def fetch_url():

current_url = yield self._q.get()

try:

if current_url in self._fetching:

return

print('fetching****** %s' % current_url)

self._fetching.add(current_url)

response = yield self.get_page(current_url)

self.handle_response(current_url, response) # handle reponse

self._fetched.add(current_url)

for i in range(self.concurrency):

if self.urls:

yield self._q.put(self.urls.pop())

finally:

self._q.task_done()

@gen.coroutine

def worker():

while True:

yield fetch_url()

self._q.put(self.urls.pop()) # add first url

# Start workers, then wait for the work queue to be empty.

for _ in range(self.concurrency):

worker()

yield self._q.join(timeout=timedelta(seconds=300000))

assert self._fetching == self._fetched

def run(self):

io_loop = ioloop.IOLoop.current()

io_loop.run_sync(self._run)

class MySpider(AsySpider):

def fetch(self, url, **kwargs):

"""重写父类fetch方法可以添加cookies,headers,timeout等信息"""

cookies_str = "PHPSESSID=j1tt66a829idnms56ppb70jri4; pspt=%7B%22id%22%3A%2233153%22%2C%22pswd%22%3A%228835d2c1351d221b4ab016fbf9e8253f%22%2C%22_code%22%3A%22f779dcd011f4e2581c716d1e1b945861%22%7D; key=%E9%87%8D%E5%BA%86%E5%95%84%E6%9C%A8%E9%B8%9F%E7%BD%91%E7%BB%9C%E7%A7%91%E6%8A%80%E6%9C%89%E9%99%90%E5%85%AC%E5%8F%B8; think_language=zh-cn; SERVERID=a66d7d08fa1c8b2e37dbdc6ffff82d9e|1444973193|1444967835; CNZZDATA1254842228=1433864393-1442810831-%7C1444972138" # 从浏览器拷贝cookie字符串

headers = {

'User-Agent': 'mozilla/5.0 (compatible; baiduspider/2.0; +http://www.baidu.com/search/spider.html)',

'cookie': cookies_str

}

return super(MySpider, self).fetch( # 参数参考tornado文档

url, headers=headers, request_timeout=1

)

def handle_html(self, url, html):

print(url, html)

def main():

urls = []

for page in range(1, 100):

urls.append('http://www.baidu.com?page=%s' % page)

s = MySpider(urls)

s.run()

if __name__ == '__main__':

main()

可以继承这个类,塞一些url进去,然后重写handle_page处理得到的页面。

异步+多进程爬虫

还可以再变态点,加个进程池,使用了multiprocessing模块。效率飕飕的,

#!/usr/bin/env python

# -*- coding:utf-8 -*-

import time

from multiprocessing import Pool

from datetime import timedelta

from tornado import httpclient, gen, ioloop, queues

class AsySpider(object):

"""A simple class of asynchronous spider."""

def __init__(self, urls, concurrency):

urls.reverse()

self.urls = urls

self.concurrency = concurrency

self._q = queues.Queue()

self._fetching = set()

self._fetched = set()

def handle_page(self, url, html):

filename = url.rsplit('/', 1)[1]

with open(filename, 'w+') as f:

f.write(html)

@gen.coroutine

def get_page(self, url):

try:

response = yield httpclient.AsyncHTTPClient().fetch(url)

print('######fetched %s' % url)

except Exception as e:

print('Exception: %s %s' % (e, url))

raise gen.Return('')

raise gen.Return(response.body)

@gen.coroutine

def _run(self):

@gen.coroutine

def fetch_url():

current_url = yield self._q.get()

try:

if current_url in self._fetching:

return

print('fetching****** %s' % current_url)

self._fetching.add(current_url)

html = yield self.get_page(current_url)

self._fetched.add(current_url)

self.handle_page(current_url, html)

for i in range(self.concurrency):

if self.urls:

yield self._q.put(self.urls.pop())

finally:

self._q.task_done()

@gen.coroutine

def worker():

while True:

yield fetch_url()

self._q.put(self.urls.pop())

# Start workers, then wait for the work queue to be empty.

for _ in range(self.concurrency):

worker()

yield self._q.join(timeout=timedelta(seconds=300000))

assert self._fetching == self._fetched

def run(self):

io_loop = ioloop.IOLoop.current()

io_loop.run_sync(self._run)

def run_spider(beg, end):

urls = []

for page in range(beg, end):

urls.append('http://127.0.0.1/%s.htm' % page)

s = AsySpider(urls, 10)

s.run()

def main():

_st = time.time()

p = Pool()

all_num = 73000

num = 4 # number of cpu cores

per_num, left = divmod(all_num, num)

s = range(0, all_num, per_num)

res = []

for i in range(len(s)-1):

res.append((s[i], s[i+1]))

res.append((s[len(s)-1], all_num))

print res

for i in res:

p.apply_async(run_spider, args=(i[0], i[1],))

p.close()

p.join()

print time.time()-_st

if __name__ == '__main__':

main()

多线程爬虫

线程池实现.

#!/usr/bin/env python

# -*- coding:utf-8 -*-

import Queue

import sys

import requests

import os

import threading

import time

class Worker(threading.Thread): # 处理工作请求

def __init__(self, workQueue, resultQueue, **kwds):

threading.Thread.__init__(self, **kwds)

self.setDaemon(True)

self.workQueue = workQueue

self.resultQueue = resultQueue

def run(self):

while 1:

try:

callable, args, kwds = self.workQueue.get(False) # get task

res = callable(*args, **kwds)

self.resultQueue.put(res) # put result

except Queue.Empty:

break

class WorkManager: # 线程池管理,创建

def __init__(self, num_of_workers=10):

self.workQueue = Queue.Queue() # 请求队列

self.resultQueue = Queue.Queue() # 输出结果的队列

self.workers = []

self._recruitThreads(num_of_workers)

def _recruitThreads(self, num_of_workers):

for i in range(num_of_workers):

worker = Worker(self.workQueue, self.resultQueue) # 创建工作线程

self.workers.append(worker) # 加入到线程队列

def start(self):

for w in self.workers:

w.start()

def wait_for_complete(self):

while len(self.workers):

worker = self.workers.pop() # 从池中取出一个线程处理请求

worker.join()

if worker.isAlive() and not self.workQueue.empty():

self.workers.append(worker) # 重新加入线程池中

print 'All jobs were complete.'

def add_job(self, callable, *args, **kwds):

self.workQueue.put((callable, args, kwds)) # 向工作队列中加入请求

def get_result(self, *args, **kwds):

return self.resultQueue.get(*args, **kwds)

def download_file(url):

#print 'beg download', url

requests.get(url).text

def main():

try:

num_of_threads = int(sys.argv[1])

except:

num_of_threads = 10

_st = time.time()

wm = WorkManager(num_of_threads)

print num_of_threads

urls = ['http://www.baidu.com'] * 1000

for i in urls:

wm.add_job(download_file, i)

wm.start()

wm.wait_for_complete()

print time.time() - _st

if __name__ == '__main__':

main()

这三种随便一种都有很高的效率,但是这么跑会给网站服务器不小的压力,尤其是小站点,还是有点节操为好。

微信

分享

相关标签:Python 多线程 爬虫 异步

本文原创发布php教程 ,转载请注明出处,感谢您的尊重!

上一篇:使用Python简单的实现树莓派的WEB控制

下一篇:在Ubuntu系统下安装使用Python的GUI工具wxPython

相关文章

相关视频

在Django框架中运行Python应用全攻略

在Python的Django框架中创建和使用模版

python获取元素在数组中索引号的方法

浅谈python中截取字符函数strip,lstr...

Python多线程、异步+多进程爬虫实现代码

Python 简介

Python 环境搭建

Python 中文编码

Python 基础语法

Python 变量类型

网友评论

文明上网理性发言,请遵守 新闻评论服务协议

我要评论

立即提交

专题推荐

独孤九贱-php全栈开发教程

全栈 100W+

主讲:Peter-Zhu 轻松幽默、简短易学,非常适合PHP学习入门

玉女心经-web前端开发教程

入门 50W+

主讲:灭绝师太 由浅入深、明快简洁,非常适合前端学习入门

天龙八部-实战开发教程

实战 80W+

主讲:西门大官人 思路清晰、严谨规范,适合有一定web编程基础学习

作者信息

php教程

认证0级讲师

最近文章

发布技术文章

最新文章

热门排行

python之禅怎么打出来

python怎么学

boosting和bootstrap区别

python库是什么意思

python卸载后怎么也安装不上

python安装后怎么不见了

python怎么卸载模块

python能做什么?是什么?

pickle库的使用详解

Anaconda的新手使用大全

python爬虫是什么?为什么把python叫做爬虫?

Python微信库:itchat的用法详解

关于python3学习基础知识总结

python爬虫是什么

使用Python可以做什么

python如何实现可视化热力图

推荐视频教程

javascript初级视频教程

jquery 基础视频教程

视频教程分类

php视频教程

html视频教程

css视频教程

JS视频教程

你可能感兴趣的:(python多线程爬虫和异步爬虫_Python多线程、异步+多进程爬虫实现代码)