【python2.7】爬取知網論文

# -*- coding: utf-8 -*-
import time
import urllib
import urllib2
import cookielib
from lxml import etree
import random


'''
爬取第一頁,獲取共頁數
爬取第二頁至最后一頁
'''

# 下載當前頁所有文章的pdf或caj
def download_paper(treedata, opener, localdir):
    '''
    傳入參數:
        treedata:當前列表頁的treedata數據
        opener: referer已修改為當前頁
        localdir: 保存目錄
    '''
    tr_node = treedata.xpath("http://tr[@bgcolor='#f6f7fb']|//tr[@bgcolor='#ffffff']")

    for item in tr_node:
        paper_title = item.xpath("string(td/a[@class='fz14'])")
        paper_link = item.xpath("td/a[@class='fz14']/@href")
        paper_author = item.xpath("td[@class='author_flag']/a/text()")
        paper_source = item.xpath("td[4]/a/text()")
        paper_pub_date = item.xpath("td[5]/text()")
        paper_db = item.xpath("td[6]/text()")
        paper_cited = item.xpath("td[7]//a/text()")
        paper_download_count = item.xpath("td[8]/span/a/text()")
        print paper_title
        print paper_link

        # 獲取paper詳情頁面鏈接,訪問詳情頁前,要設置referer
        paper_detail_url_fake = "http://kns.cnki.net" + paper_link[0]
        response = opener.open(paper_detail_url_fake)
        paper_detail_page_treedata = etree.HTML(response.read())
        # 下載前要設置referer為詳情頁
        opener.addheaders = [("Referer", response.url)]

        # 碩士論文并沒有【pdf下載】的鏈接
        pdf_download_url = paper_detail_page_treedata.xpath('//*[@id="pdfDown"]/@href')
        if len(pdf_download_url) == 0:
            whole_book_download_url = paper_detail_page_treedata.xpath('//*[@id="DownLoadParts"]/a[1]/@href')
            download_url = whole_book_download_url[0]
            filename = localdir + paper_title + ".caj"
        else:
            download_url = pdf_download_url[0]
            filename = localdir + paper_title + ".pdf"
        filename.replace("\\", "").replace("/","").replace(":", "").replace("*", "").replace("?", "").replace("\"","").replace("<","").replace(">","").replace("|","")
        response_file = opener.open(download_url)
        down_file = open(filename, 'wb')
        down_file.write(response_file.read())
        down_file.close()


# 構建第一次請求時使用的URL
url = 'http://kns.cnki.net/kns/request/SearchHandler.ashx?action=&NaviCode=*&'
parameter={'ua':'1.11'}
parameter['formDefaultResult']=''
parameter['PageName']='ASP.brief_default_result_aspx'
parameter['DbPrefix']='SCDB'
parameter['DbCatalog']='中國學術文獻網絡出版總庫'
parameter['ConfigFile']='SCDBINDEX.xml'
parameter['db_opt']='CJFQ'
parameter['db_opt']='CJFQ,CJRF,CDFD,CMFD,CPFD,IPFD,CCND,CCJD'
parameter['txt_1_sel']='SU$%=|'
parameter['txt_1_value1']='爬蟲'
parameter['txt_1_special1']='%'
parameter['his']='0'
parameter['parentdb']='SCDB'
parameter['__']='Sun Nov 05 2017 20:09:05 GMT+0800 (中國標準時間) HTTP/1.1'
times = time.strftime('%a %b %d %Y %H:%M:%S')+' GMT+0800 (中國標準時間)'
parameter['__']=times

getdata = urllib.urlencode(parameter)

uapools = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
    "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
    "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
]




headers = {'Connection': 'Keep-Alive','Accept': 'text/html,*/*','User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.66 Safari/537.36'}
headers['Referer']='http://kns.cnki.net/kns/brief/default_result.aspx'
#headers['User-Agent'] = random.choice(uapools)
req = urllib2.Request(url + getdata, headers=headers)

cookie = cookielib.CookieJar()

opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie), urllib2.HTTPHandler)
html = opener.open(req).read()

with open('C:/code/test3/web1.html', 'w') as e:
    e.write(html)

# 構建第二次請求時使用的URL
query_string = urllib.urlencode({'pagename': 'ASP.brief_default_result_aspx','dbPrefix':'SCDB', 'dbCatalog': '中國學術文獻網絡出版總庫',
                                 'ConfigFile': 'SCDBINDEX.xml', 'research':'off', 't': int(time.time()), 'keyValue': '爬蟲', 'S': '1'})

url2 = 'http://kns.cnki.net/kns/brief/brief.aspx'
req2 = urllib2.Request(url2 + '?' + query_string, headers=headers)
# 返回的是搜索結果列表頁,第一頁
result2 = opener.open(req2)
#opener.addheaders = [("Referer", req2.get_full_url())]
html2 = result2.read()
with open('C:/code/test3/web2.html', 'w') as e:
    e.write(html2)

treedata = etree.HTML(html2)

# 請求詳情頁之前把引用地址改成列表頁
opener.addheaders = [("Referer", req2.get_full_url())]
localdir = "C:/code/test3/pdf/"
download_paper(treedata, opener, localdir)

#獲取總頁數total_page_count
current_page_node = treedata.xpath('//span[@class="countPageMark"]/text()')
print "current_page_node:", current_page_node
total_page_count = current_page_node[0].split('/')[1]
print "total_page_count:", total_page_count

current_url = result2.url
for page_num in range(2, int(total_page_count)+1):
    #獲取下一頁的鏈接
    print "準備爬取第", str(page_num), "頁"
    next_page_node = treedata.xpath('//div[@class="TitleLeftCell"]/a[last()]/@href')
    next_page_url = next_page_node[0]
    next_page_url_full = url2 + next_page_url
    opener.addheaders = [("Referer", current_url)]
    # 返回的是搜索結果下一頁的列表頁
    next_page_response = opener.open(next_page_url_full)
    opener.addheaders = [("Referer", next_page_response.url)]
    #file_next_page = open('C:/code/test3/web4' + str(page_num) + '.html', 'w')
    html = next_page_response.read()
    #file_next_page.write(html)
    #file_next_page.close()
    
    
    #print "current_url:", current_url
    #print "next_page_url:", next_page_response.url
    # 修改上一頁,以供請求下頁時引用
    #result2 = next_page_response
    treedata = etree.HTML(html)
    current_url = next_page_response.url

    localdir = "C:/code/test3/pdf/"
    download_paper(treedata, opener, localdir)

最新代碼
https://github.com/tom523/crawlCnki.git
爬蟲夾故障

  • 服務器響應超時
  • 驗證碼輸入
image.png

不只一次的在第17頁,需要輸入驗證碼


image.png

處理方法:重新更換User-Agent后,直接從第17頁開始爬取

20171110日志
image.png

20171109晚上爬取結果,搜索關鍵字“爬蟲”,看起來像是給了假數據,可能知網檢測出來了爬蟲。

最后編輯于
?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。

推薦閱讀更多精彩內容

  • Android 自定義View的各種姿勢1 Activity的顯示之ViewRootImpl詳解 Activity...
    passiontim閱讀 172,813評論 25 708
  • 簡介: 為了你我可以放棄我的全部,只是,你卻不給我放棄的機會,我踏過千山萬水只為換你回眸一笑,然而,最終你還...
    穿越深海來愛你閱讀 530評論 0 1
  • 至今我依然無法融入到人群之中,不論我如何的努力。 在人群之中時,我會帶著一個面具。但是無人能夠看出面具之下的我是多...
    蜉蝣的蜉蝣閱讀 160評論 0 0
  • 傍晚在街邊的公交站臺那個日日都停留的地方日日和不同的人接踵把萬事都磨碎了裝在垃圾桶里把煩惱都逼瘋了附在擁擠的車廂里...
    流虻閱讀 274評論 0 0
  • 01 昨天,大學閨蜜找我聊天,無意中和我提到她的一個好閨蜜最近分手了,分手的原因是男方出軌。 事情經過是這樣的:女...
    蘇草Chen閱讀 464評論 0 2