利用PhamthonJS+Tor代理爬取网站信息

# -*- coding: utf-8 -*-
"""
Created on Fri Feb 24 13:55:44 2017

@author: wang bei bei
"""
#使用selenium的webdriver的方法
import csv
import os
import time
import re
import threading
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
##进程锁
service_args = [ '--proxy=localhost:9150', '--proxy-type=socks5', ]
stu_id=[]
cet=[]
sex=[]
faculty=[]
crash=0
lock=threading.Lock()
###############获取csv文件中的行数(暂时不用管它....因为没用上)############
def raw_num(filepath):
    count = 0 
    cs=open("C:\\Users\\wang bei bei\\Desktop\\2017.csv","r",errors="ignore")
    reader=csv.reader(cs)
    for index,line in enumerate(reader): 
        count += 1
    cs.close()
    return count
############模拟登陆############
def post_get(html,driver,name,password):
    global writer
    global crash
    global i
    driver.delete_all_cookies()
    grade=""
    #i=0
    driver.get(html)
    #避免加载不完全的防护措施:
    #element=WebDriverWait(driver,65,5).until(EC.presence_of_element_located((By.XPATH,"//*[@id=\"leftH\"]/div/table/tbody/tr[6]/td/span")))
    driver.find_element_by_xpath("//*[@id=\"zkzh\"]").clear()
    driver.find_element_by_xpath("//*[@id=\"zkzh\"]").send_keys(password)
    driver.find_element_by_xpath("//*[@id=\"xm\"]").clear()
    driver.find_element_by_xpath("//*[@id=\"xm\"]").send_keys(name)
    driver.find_element_by_xpath("//*[@id=\"submitCET\"]").click()
    #element=WebDriverWait(driver,40,2).until(EC.presence_of_element_located((By.XPATH,"//*[@id=\"leftH\"]/div/table/tbody/tr[6]/td/span")))
    ip=driver.find_elements_by_xpath("//*[@id=\"leftH\"]/div/table/tbody/tr[6]/td/span")
    #要使用elements才可以递归,其实用xpath更好.......
    for i in ip:
        grade=i.text
    return grade
####################从csv文件读数据############################
def read_out():
    global name 
    global i
    global password
    cs=open("C:\\Users\\XXXXX\\2017.csv","r",errors="ignore")
    reader=csv.reader(cs)
    for num,line in enumerate(reader):
        if(num==0):
            continue
        name.append(line[6])
        password.append(line[5])
        stu_id.append(line[15])
        cet.append(line[1])
        sex.append(line[7])
        faculty.append(line[12])
        #print("准考证号: "+password[len(password)-1]+"  姓名: "+name[len(name)-1])
    cs.close()
##################往csv文件写数据(内嵌爬取网页操作)##########################
def write_in(driver,i):
    global lock
    global csv_file
    global writer
    global error
    grade=0
    #stop 用于计数,如果有哪个倒霉的数据超过多次提交都失败就丢弃到
    stop=1
    html="http://www.chsi.com.cn/cet/"
    while(len(password)!=0):
        #print("第%d个线程"%i)
        try:
            lock.acquire()
            try:
                n=name.pop()
                p=password.pop()
                id=stu_id.pop()
                cet_=cet.pop()
                sex_=sex.pop()
                faculty_=faculty.pop()
            except:
                lock.release()
            lock.release()
            grade=post_get(html,driver,n,p)
            lock.acquire()
            try:
                print("\n第%d个线程:"%i)
                print("准考证号: "+p+"  姓名: "+n)
                print(grade)
                print("Thread still alive:%d"%threading.active_count())
            except:
                print("Some problems in grade printing")
                lock.release()
        except Exception as e:
            print("Exception in grade :")
            print(e)
            #如果出线程爬取6-1次内出了bug则返回栈
            if(stop%6!=0):
                stop=stop+1
                lock.acquire()
                try:
                    password.append(p)
                    name.append(n)
                except:
                    lock.release()
            if(stop%6==0):
                stop=1
                lock.acquire()
                try:
                    print("\n/******write in error.csv")
                    print("第%d个线程:"%i)
                    print("准考证号: "+p+"  姓名: "+n+"******/\n")
                    error.writerow([id,cet_,n,p,sex_,faculty_])
                except:
                    lock.release()
                continue
        try:
            writer.writerow([id,cet_,n,p,sex_,faculty_,grade])
            lock.release()
        except:
            print(">>>>>>>error")
            #print("/*****error in writting.")
            #print("第%d个线程:"%i)
            #print("准考证号: "+p+"  姓名: "+n+"****/")
            try:
                lock.acquire()
                password.append(p)
                name.append(n)
            except:
                lock.release()
    driver.quit()
###########主函数####################
##存储堆栈
threads=[]
password=[]
name=[]
##读取数据到堆栈
read_out()
##############
grade=0
###线程个数
thread_num=6
###csv文件指针等.....####
#html="http://www.chsi.com.cn/cet/"
error_file=open("C:\\Users\\XXXXXX\\error.csv","w",newline="",errors="ignore")
csv_file=open("C:\\Users\\XXXXXX\\result.csv","w",newline="",errors="ignore")
writer=csv.writer(csv_file)
error=csv.writer(error_file)
writer.writerow("fuck!")
error.writerow("fuck!")
###########多进程部分###############
for i in range(thread_num):
    print("第%d个线程入栈:"%i)
    dcap = dict(DesiredCapabilities.PHANTOMJS)
    dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36")
#修改请求头还有代理    driver=webdriver.PhantomJS(service_args=service_args,desired_capabilities=dcap)
    driver.implicitly_wait(65)
    t=threading.Thread(target=write_in,args=(driver,i))
    threads.append(t)
#开启多线程
for i in range(thread_num):
    print("start第%d个线程"%i)
    threads[i].start()
    #time.sleep(1)
for i in range(thread_num):
    threads[i].join()
#####################################
print("一共花了:")
print(time.clock())
csv_file.close()          
error_file.close()       

跑到最后崩得就剩一个线程在跑了........不知道怎么回事
报错:

利用PhamthonJS+Tor代理爬取网站信息_第1张图片
Paste_Image.png

你可能感兴趣的:(利用PhamthonJS+Tor代理爬取网站信息)