urllib

urllib.request

urllib.request模塊定義函數(shù)和類用來(lái)打開(kāi)URLs

urllib.request.urlopen(url, data=None, [timeout, ]*, cafile=None, capath=None, cadefault=False, context=None)

  • url:可以是一個(gè)字符串連接,也可以是一個(gè)Request對(duì)象
  • data:是訪問(wèn)URL時(shí)要傳送的數(shù)據(jù)
  • timeout:訪問(wèn)超時(shí)設(shè)置
#-*-coding:UTF-8-*-
import urllib.request   #導(dǎo)入模塊
response = urllib.request.urlopen('https://baidu.com') #打開(kāi)網(wǎng)頁(yè)
print(response.read())    #輸出內(nèi)容

構(gòu)造Request實(shí)例進(jìn)行訪問(wèn)

class urllib.request.Request(url, data=None, headers={}, origin_req_host=None, unverifiable=False, method=None)

#--coding:UTF-8--
import urllib.request
request = urllib.request.Request('https://baidu.com')
response = urllib.request.urlopen(request)
print(response.read())

GET方式傳遞數(shù)據(jù)
http://www.guancha.cn/Search/?k=一帶一路
這是在觀察者網(wǎng)搜索一帶一路內(nèi)容的,瀏覽器上顯示網(wǎng)址

#-*-coding:UTF-8-*-
import urllib.request
values = {}
values['k'] = '一帶一路'
data = urllib.parse.urlencode(values)
print(data)   #k=%E4%B8%80%E5%B8%A6%E4%B8%80%E8%B7%AF
url = 'http://www.guancha.cn/Search/?'+data
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
con = open('g.html','wb')
co = response.read();
con.write(co)
con.close()
###
則g.html里的內(nèi)容為

<!DOCTYPE html>
<html lang="zh-cmn-Hans">
<head>
    <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
    <meta http-equiv="content-language" content="zh-CN">
    <title>搜索結(jié)果頁(yè)</title>
    <link rel="stylesheet" type="text/css" href="../css/public.css">
    <link rel="stylesheet" type="text/css" href="../css/main.css">
    <meta name="Description" content="觀察者網(wǎng),致力于薈萃中外思想者精華,鼓勵(lì)青年學(xué)人探索,建中西文化交流平臺(tái),為崛起中的精英提供決策參考。         " />
<meta name="Keywords" content="觀察者網(wǎng),觀察者,春秋綜合研究院,新聞,新媒體,觀察,中國(guó)模式,政治,軍事,歷史,評(píng)論" />
<title>觀察者網(wǎng)-中國(guó)關(guān)懷 全球視野</title>    <link rel="shortcut icon"  />
    <script type="text/javascript" src="../js/jquery-1.8.2.min.js"></script>
    <script type="text/javascript" src="../js/jquery.pagination.js"></script>
</head>
<body>
<div class="header">
......

POST請(qǐng)求
傳遞參數(shù)需要用到 前文中data參數(shù) (data must be a bytes object)

import urllib.request,urllib.parse
url = 'http://www.xxx.com'
postdata = urllib.parse.urlencode({
    'name':'diyinqianchang',
    'pass':'88888'
    }).encode('UTF-8')      #注意傳輸參數(shù)是一個(gè)bytes
req = urllib.request.Request(url,postdata)
data = urllib.request.urlopen(req).read()
print(data)

設(shè)置Headers

import urllib.request,urllib.parse
url = 'http://www.zhihu.com/#signin'
postdata = urllib.parse.urlencode({
    'username':'188\*\*\*\*8091',
    'password':'88888888'
    }).encode('UTF-8')
req = urllib.request.Request(url,postdata)
req.add_header('User_Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36')  #添加Agent
req.add_header('Referer','https://www.zhihu.com/') #添加反盜鏈
data = urllib.request.urlopen(req).read()
con = open('zhihu.html','wb')
con.write(data)
con.close()
print(data)

另外一種設(shè)置方式

headers = {
    'User_Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
data = urllib.parse.urlencode(values)
request = urllib.request.Request(url, data.encode('utf-8'), headers)
#直接利用Request的第三個(gè)參數(shù)heards = {}

代理服務(wù)器的設(shè)置

#coding:UTF-8
import urllib.request
"""
If proxies is given, it must be a dictionary mapping protocol names to URLs of proxies
"""
def use_proxy(proxy_addr,url):
    proxy = urllib.request.ProxyHandler({'http':proxy_addr})
    opener = urllib.request.build_opener(proxy)
    urllib.request.install_opener(opener)
    data = urllib.request.urlopen(url).read().decode('utf-8')
    return data
proxy_addr = '119.\*\*.\*\*.60:7777'
data = use_proxy(proxy_addr,'https://www.baidu.com')
print(len(data))
###
227

URLError

#coding:UTF-8

"""
處理網(wǎng)絡(luò)錯(cuò)的連個(gè)類URLError和HTTPError。后者是前者的子類,在拋出異常中,前者能夠處理的異常較多
URLError能夠處理的異常有:連接補(bǔ)上服務(wù)器、遠(yuǎn)程URL不存在、無(wú)網(wǎng)絡(luò)、HTTPError
"""
import urllib.request
import urllib.error
try:
    urllib.request.urlopen('http://blog.baiduss.net')
except urllib.error.HTTPError as e:
    print(e.code)
    print(e.reason)
except urllib.error.URLError as e:
    print(e.reason)
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡(jiǎn)書(shū)系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

推薦閱讀更多精彩內(nèi)容