Python-使用Selenium+MongoDB抓取豆瓣网的租房信息

事情的开始是这样的:
最近准备要换房子了,经同事推荐上豆瓣没有中介费.(大写的穷! :( )但是上去一看没有的功能,这要一条一条的看,那得看到什么时候啊,考虑到近视越来越严重,开始撸波代码抓下数据吧...正好把最近学习的用上
豆瓣.py (原谅我命名的不规范)

import re
import time
from CLMongo import mongo,mongo_connect_collection
import pymongo
from selenium import webdriver

class DB_request:

    basic_url = 'https://www.douban.com/group/search?cat=1019&q='
    start = 0
    isToday = True

    def __init__(self,search_data):

        self.search_data = search_data
        one_request_url = self.basic_url + self.search_data
        self.requestTo_douban(one_request_url)


    def requestTo_douban(self,request_url=basic_url,page = start):

        driver,data = self.request_from_url(request_url)
        driver.quit()
        group_list = self.re_findall(r'result(.*?)class=\"info\"',data)
        if group_list:
            # 遍历搜索到的小组,拿到url
            for agroup in group_list:
                self.isToday = True
                agroup_content_url_list = self.re_findall(r'href=\"(.*?)\"', agroup)
                if agroup_content_url_list:
                    agroup_content_url = agroup_content_url_list[0]+'discussion?start='
                    self.content_from_agroupURL(agroup_content_url)
                else:
                    print("查找一个小组列表的 URL 失败了!")
        else:
            print("搜索的小组为空?")

        ''' 翻页 '''
        # 简单测试了一下 100页之后会跳转登录,为了简单点就只抓99页的数据,已经很多了
        if page < 99:
            self.start += 20
            page_url = 'https://www.douban.com/group/search?start=%s&cat=1019&sort=relevance&q=%s'%(self.start,self.search_data)
            self.requestTo_douban(page_url,self.start)


    def content_from_agroupURL(self,url,start='0'):
        # 这块也可以写成触发点击事件,速度上会快一些
        request_url = url+start
        driver, text = self.request_from_url(request_url)
        driver.quit()
        content_list = self.re_findall(r'(.*?)',text)
        if content_list:
            self.content_handle(content_list)
        # 如果还是日期相同 循环调用
        if self.isToday:
            self.content_from_agroupURL(url, str(int(start) + 25))
        else:
            print('已经是不同日期了', self.isToday)

    def content_handle(self,list):

        for content in list:
            content_time = self.re_findall(r'class=\"time\">(.*?)', content)
            d = self.re_findall(r'-(.\d) ', content_time[0])[0]
            local_d = time.strftime('%d', time.localtime())
            # 只抓取当天的数据,所以做一个时间上的比较
            if d == local_d:
                content_url = self.re_findall(r'class=\".*?href=\"(.*?)\" title', content)
                content_title = self.re_findall(r'title=\"(.*?)\" ', content)
                dict = {'day': str(d),
                        'title': content_title[0]}
                self.content_detail(content_url[0],dict)
            else:
                print("不是最新的消息了已经,我就不抓取咯..",d,local_d)
                self.isToday = False
                break

    def content_detail(self,detail_url,dict):
        # 把url当做唯一不重复的,去重
        if mongo.is_repeat(detail_url):
            driver, detail_data = self.request_from_url(detail_url)
            driver.quit()
            dict['url'] = detail_url
            detail_content = self.re_findall(r'id=\"link-report\"(.*?)

CLMongo.py

import pymongo
import time
import random

class MongoHandle(object):

    def connect_collection(self):

        print('连接 MongoDB ...')
        client = pymongo.MongoClient(host='127.0.0.1', port=27017)
        # 连接到 test 库
        db = client['test']
        # 打印库中所有的集合名称
        # print(db.collection_names())
        # 返回 DouBan 这个集合
        return db.DouBan

    def insert_handle(self,dict):

        try:
            mongo_connect_collection.insert_one(dict)
        except Exception as e:
            print(e)
        finally:
            time.sleep(random.randint(1,8))

    def remove_time_out(self):

        local_d = time.strftime('%d', time.localtime())
        try:
            # 对比一下今天的时间,不一致就删除
            mongo_connect_collection.remove({"day": {"$ne": local_d}})
        except Exception as e:
            print("出错了 -->",e)

    def is_repeat(self,url):

        repeat = mongo_connect_collection.find_one({"url": url})
        if repeat:
            return False
        else:
            return True

mongo = MongoHandle()
mongo_connect_collection = MongoHandle().connect_collection()

MongDB.png

关于 MongoDB 的部分操作移步: MongoDB各种查询
如有侵权,请告知删除
刚学爬虫不久,今后有机会更深入学习的时候再改进代码.欢迎大神指正!

你可能感兴趣的:(Python-使用Selenium+MongoDB抓取豆瓣网的租房信息)