爬虫之BeautifulSoup类

安装:pip install BeautifulSoup4

下表列出了主要的解析器,以及它们的优缺点:看个人习惯选取自己喜欢的解析方式

爬虫之BeautifulSoup类_第1张图片

 1 # 获取html代码
 2 import requests
 3 r = requests.get('http://www.python123.io/ws/demo.html')
 4 demo = r.text
 5 from bs4 import BeautifulSoup
 6 soup = BeautifulSoup(demo,'html.parser')
 7 print(soup.prettify()) #按照标准的缩进格式的结构输出,代码如下
 8 
 9  
10   
<span style="color: #008080;">11</span>    This <span style="color: #0000ff;">is</span><span style="color: #000000;"> a python demo page
</span><span style="color: #008080;">12</span>   
13  
14  
15   

class="title"> 16 17 The demo python introduces several python courses. 18 19

20

class="course"> 21 Python is a wonderful general-purpose programming language. You can learn Python from novice to professional by tracking the following courses: 22 class="py1" href="http://www.icourse163.org/course/BIT-268001" id="link1"> 23 Basic Python 24 25 and 26 class="py2" href="http://www.icourse163.org/course/BIT-1001870001" id="link2"> 27 Advanced Python 28 29 . 30

31 32

简单浏览数据化方法的用法

 

#demo的源代码
html_d="""
This is a python demo page

The demo python introduces several python courses.

Python is a wonderful general-purpose programming language. You can learn Python from novice to professional by tracking the following courses: Basic Python and Advanced Python.

""" from bs4 import BeautifulSoup soup=BeautifulSoup(html_d,'html.parser') # 获取title标签 print(soup.title) #获取文本内容 print(soup.text) #获取标签名称 print(soup.title.name) #获取标签属性 print(soup.title.attrs) #获取head标签的子节点 print(soup.p.contents) print(soup.p.children) #获取所有的a标签 print(soup.find_all('a'))

 

常用解析方法

#demo的源代码
html_d="""
This is a python demo page

The demo python introduces several python courses.

Python is a wonderful general-purpose programming language. You can learn Python from novice to professional by tracking the following courses: Basic Python and Advanced Python.

""" from bs4 import BeautifulSoup soup = BeautifulSoup(html_d,"lxml") #p下面所有的子节点 print(soup.p.contents) soup.contents[0].name #children本身没有子节点,得到一个迭代器,包含p下所有子节点 print(soup.p.children) for child in enumerate(soup.p.children): print(child) #子孙节点p下面所有的标签都会出来 print(soup.p.descendants) for i in enumerate(soup.p.children): print(i) # string 下面有且只有一个子节皆可以取出,如有多个字节则返回为none print(soup.title.string) # strings 如果有多个字符串 for string in soup.strings: print(repr(string)) #去掉空白 for line in soup.stripped_strings: print(line) #获取a标签的父节点 print(soup.a.parent) #找到a标签的父辈节点 print(soup.a.parents) #兄弟节点 print(soup.a.next_sibling) #同一个兄弟 print(soup.a.next_sibling) #上一个兄弟 print(soup.a.next_sibling) #下一个兄弟

find_all的用法( name, attrs, recursive, text, **kwargs)

import re
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_d,"lxml")
# name
for tag in soup.find_all(re.compile('b')):
print(tag.name)
#attrs
print(soup.find_all('p','course'))
#keyword
print(soup.find_all(id='link1'))
#recursive
# print(soup.find_all('a',recursive=False))
# string
# print(soup.find_all(string=re.compile('python')))

小案例

 

import requests
from bs4 import BeautifulSoup
import bs4
#获取URL里面信息
def getHtmlText(url):
    try:
        r= requests.get(url,timeout=30 )
        r.encoding=r.apparent_encoding
        return r.text
    except:
      return ""
#提起网页数据
def fillunivList(ulist,html):
    soup = BeautifulSoup(html,"html.parser")
    for tr in soup.find('tbody').children:
        if isinstance(tr,bs4.element.Tag):
            tds = tr('td')
            ulist.append([tds[0].string,tds[1].string,tds[2].string,tds[3].string])
    pass
#打印数据结果
def printUnivList(ulist,num):
    # tplt = "{0:^10}\t{1:{3}^10}\t{2:^10}\t{:^10}"
    # print(tplt.format('排名', '学校名称', '省份','总分',chr(12288)))
    # for i in range(num):
    #     u = ulist[i]
    #     print(tplt.format(u[0], u[1], u[2],u[3],chr(12288)))
    print("{:^10}\t{:^6}\t{:^10}\t{:^10}".format('排名', '学校名称', '地区', '总分'))
    for i in range(num):
         u = ulist[i]
         print("{:^10}\t{:^6}\t{:^10}\t{:^10}".format(u[0], u[1], u[2], u[3]))
    return
def main():
    unifo = []
    url = 'http://www.zuihaodaxue.cn/zuihaodaxuepaiming2019.html'
    html = getHtmlText(url)
    fillunivList(unifo,html)
    printUnivList(unifo,20) #打印前20所
main()

 

你可能感兴趣的:(爬虫之BeautifulSoup类)