# -*- coding:utf-8 -*-
import requests,threading,random,string
from lxmlimport etree
from queueimport Queue
from timeimport sleep
from threadingimport Thread
# 保存文件的函数
def savefile(savepath,content):
fp= open(savepath,'a+',encoding='utf8',newline="",errors='ignore')
fp.write(content+"\n")
fp.close()
#生成随机3-6位前缀
def GetPassword():
# digits生成所有数字 ascii_letters生成所有字母
slcNum=[random.choice(string.digits+string.ascii_lowercase)for iin range(random.randint(3,6))]
random.shuffle(slcNum)# 将序列的所有元素随机排序
getPwd=''.join([ifor iin slcNum])
return getPwd
lock= threading.RLock()# 多个线程访问共享数据
def getHTMLText(urlqueue):
global lock
while urlqueue.qsize()> 0:
if lock.acquire(): # 给可能出现数据访问冲突的代码块上锁
urls_line= urlqueue.get()# 获取要爬取的url地址
for iin range(1,101):
print(" <%s> 域名第 <%s> 次采集 " % (urls_line, i))
url_line= GetPassword()
url= "http://" + url_line+ '.' + urls_line+ "/"
# print(url)
while True:
try:
headers= {
'User-Agent': 'Mozilla/5.0 (compatible; Baiduspider-render/2.0; +http://www.baidu.com/search/spider.html)'
}
r= requests.get(url=url,headers=headers,verify=True,timeout=5)# 证书验证设为FALSE
r.raise_for_status()#不是200,抛出异常requests.HTTPError
r.encoding= r.apparent_encoding
print("采集网页的状态码:%s" %r.status_code)
print("采集网页的URL:%s" %r.url)
html_title= "".join(etree.HTML(r.text).xpath('//title/text()'))
print("采集网页的标题:%s" %html_title)
title_path= './save_title.txt'
savefile(title_path, html_title)
print("爬取完成:对%s目标地址采集完成" % url)
print("*"*70)
if r.status_code== 200:
break
except Exception as ex:
print(Exception,":", ex)
sleep(1)
if __name__== '__main__':
url_queue= Queue()
seen_queue= set()
filepath= "./小辉-top采集.txt"
txtfile= [line.strip()for linein open(filepath,encoding="GB2312").readlines()]
for linein txtfile:
if line:
line= line.strip()
url_queue.put(line)
seen_queue.add(line)# set集合添加内容
#print(url_queue.qsize())
threads= []# 声明一个变量,保存多个线程
#threads_num = 50 # 声明一个变量,控制启动多少个线程
threads_num= int(input("请输入线程数:"))
for ctin range(0,threads_num): # 创建线程对象,并启动线程
current_thread= threading.Thread(target=getHTMLText,args=(url_queue,))# 创建线程对象
current_thread.setDaemon(True)# 设置守护进程
threads.append(current_thread)# 将线程保存在列表中
current_thread.start()
for tin threads: # 让所有的线程join,就是让主线程等待所有子线程运行结束再推出
t.join()
print("程序执行结束....")