python 爬虫爬取图片 BeautifulSoup

转载地址

#coding=utf-8
import requests
from bs4 import BeautifulSoup
import os
from multiprocessing import Pool

# http请求头
Hostreferer = {
    'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
    'Referer': 'http://www.mzitu.com'
}
Picreferer = {
    'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
    'Referer': 'http://i.meizitu.net'
}
# 此请求头破解盗链
all_url = 'http://www.mzitu.com'

# 保存地址
path = 'D:/mzitu/'
def Download(href,title):
    if(os.path.exists(path+title.strip().replace('?','')) and len(os.listdir(path+title.strip().replace('?',''))) >= int(pic_max)):
        print('已完毕,跳过'+title)
        return 1
    html = requests.get(href,headers = Hostreferer)
    soup = BeautifulSoup(html.text,'html.parser')
    pic_max = soup.find_all('span')
    pic_max = pic_max[10].text  # 最大页数
    print("开始扒取:" + title)
    os.makedirs(path+title.strip().replace('?',''))
    os.chdir(path + title.strip().replace('?',''))

    for num in range(1,int(pic_max)+1):
        pic = href+'/'+str(num)

        #print(pic)
        html = requests.get(pic,headers = Hostreferer)

        mess = BeautifulSoup(html.text,"html.parser")

        pic_url = mess.find('img',alt = title)
        html = requests.get(pic_url['src'],headers = Picreferer)

        file_name = pic_url['src'].split(r'/')[-1]

        f = open(file_name,'wb')
        f.write(html.content)
        f.close()
    print('完成'+title)


if __name__=='__main__':


    start_html = requests.get(all_url, headers=Hostreferer)


    # 找寻最大页数
    soup = BeautifulSoup(start_html.text, "html.parser")
    page = soup.find_all('a', class_='page-numbers')
    max_page = page[-2].text

    same_url = 'http://www.mzitu.com/page/'
    pool = Pool(15)
    for n in range(1, int(max_page) + 1):
        ul = same_url + str(n)
        start_html = requests.get(ul, headers=Hostreferer)
        soup = BeautifulSoup(start_html.text, "html.parser")
        all_a = soup.find('div', class_='postlist').find_all('a', target='_blank')
        for a in all_a:
            title = a.get_text()  # 提取文本
            if (title != ''):
                href = a['href']
                pool.apply_async(Download, args=(href, title))
    pool.close()
    pool.join()
    print('所有图片已下完')

 

你可能感兴趣的:(python,爬虫,ACM)