[懶人福利]用Python進行[天堂電影]下載資源的搜索

#!/usr/bin/env python
#encoding:utf-8

import requests
from bs4 import BeautifulSoup
import urllib
import sys
import re

# 解決編碼錯誤問題
reload(sys)  
sys.setdefaultencoding('utf8') 

def getHex(words):
    mywords = words.split("%")[1:]
    result = ""
    for i in mywords:
        result += chr(int(i, 16))
    return result

'''
電影天堂模塊
'''

# config-start
maxPage = 5
modelName = "電影天堂"
url = "http://www.dy2018.com"
keyword = sys.argv[1]
pageSize = 20
keywordURLencode = urllib.quote(keyword.decode(sys.stdin.encoding).encode('GBK')) # 將查詢關(guān)鍵字進行URL編碼
searchUrl = "http://www.dy2018.com/e/search/index.php"
postData = {
    'classid':'0',
    'show':'title,smalltext',
    'tempid':'1',
    'keyboard': getHex(keywordURLencode),
    'Submit':chr(0xC1) + chr(0xA2) + chr(0xBC) + chr(0xB4) + chr(0xCB) + chr(0xD1) + chr(0xCB) + chr(0xF7)
}
headers = {
        'Host' : 'www.dy2018.com',
        'Cache-Control' : 'max-age=0',
        'Origin' : 'http://www.dy2018.com',
        'Upgrade-Insecure-Requests' : '1',
        'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
        'Content-Type' : 'application/x-www-form-urlencoded',
        'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Referer' : 'http://www.dy2018.com/index.html',
        'Accept-Encoding' : 'gzip, deflate',
        'Accept-Language' : 'zh-CN,zh;q=0.8,en;q=0.6',
        'Connection' : 'close'
}
# config-end

def getContent(url):
    global headers
    response = requests.get(url, headers=headers)
    response.encoding = 'gb2312' # 設(shè)置相應(yīng)體的字符集
    return response.text

def getResultNumber(soup):
    a = soup.find("a", title="總數(shù)")
    totalResult = a.find("b").string
    return int(totalResult)

def getSoupByPostData(url,postData):
    global headers
    content = requests.post(url, data=postData, headers=headers).text.decode("UTF-8")
    soup = BeautifulSoup(content, "html.parser")
    return soup

def getPageNumber(resultNumber):
    global pageSize
    return int((resultNumber / pageSize)) + 1

def getPageID(soup):
    div = soup.find('div', class_="x", style="text-align: center;font-size: 14px;margin: 5px 0;")
    firstPage = div.findAll("a")[1]
    firstPageStr = firstPage['href']
    pageID = firstPageStr.split("-")[1]
    return pageID

def getResultDic(soup):
    results = []
    tables = soup.findAll("table", width="100%", border="0", cellspacing="0", cellpadding="0", class_="tbspan", style="margin-top:6px")
    for table in tables:
        # 獲取結(jié)果標題
        title = str(table.find("a")["title"])
        # 獲取結(jié)果描述
        describe = table.find("td", colspan="2", style="padding-left:3px").string
        # 獲取頁面詳細地址
        src = url + table.find("a")['href']
        # 獲取條目時間和點擊量
        temp = table.find("font", color="#8F8C89").string
        time = temp.split("\n")[0].split(":")[1][0:-1] # 注意這里是中文冒號
        click = temp.split("\n")[1].split(":")[1] # 注意這里是中文冒號
        # 獲取下載地址
        downloadLinks = []
        newContent = getContent(src)
        newSoup = BeautifulSoup(newContent, "html.parser")
        tbodys = newSoup.findAll("tbody")
        for tbody in tbodys:
            downloadLinks.append(tbody.find("a")['href'])
        result = {
            "title":title,
            "describe":describe,
            'time':time,
            'click':click,
            "downloadLink":downloadLinks
        }
        results.append(result)
        print "單條數(shù)據(jù)獲取成功 !"
    return results

soup = getSoupByPostData(searchUrl, postData)

resultNumber = getResultNumber(soup)
pageNumber = getPageNumber(resultNumber)
pageID = getPageID(soup)

print "查詢結(jié)果數(shù) :", resultNumber
print "總頁面數(shù)量 :", pageNumber

print "正在獲取第 1 頁的結(jié)果" 
results = getResultDic(soup)
print "該頁所有結(jié)果獲取成功 !"

if pageNumber > maxPage:
    for page in range(maxPage):
        print "正在獲取第",(page + 1),"頁的結(jié)果"
        thisUrl = "http://www.dy2018.com/e/search/result/searchid-" + pageID + "-page-" + str(page) + ".html"
        tempContent = getContent(thisUrl)
        tempSoup = BeautifulSoup(tempContent, "html.parser")
        results += getResultDic(soup)
    SIGN = input("已經(jīng)獲取了" + maxPage + "個頁面 , 是否需要繼續(xù)獲取 ? [1/0]")
    if SIGN == 1:
        for page in range(maxPage, pageNumber):
            print "正在獲取第",(page + 1),"頁的結(jié)果"
            thisUrl = "http://www.dy2018.com/e/search/result/searchid-" + pageID + "-page-" + str(page) + ".html"
            tempContent = getContent(thisUrl)
            tempSoup = BeautifulSoup(tempContent, "html.parser")
            results += getResultDic(soup)
else:
    for page in range(pageNumber):
        print "正在獲取第",(page + 1),"頁的結(jié)果"
        thisUrl = "http://www.dy2018.com/e/search/result/searchid-" + pageID + "-page-" + str(page) + ".html"
        tempContent = getContent(thisUrl)
        tempSoup = BeautifulSoup(tempContent, "html.parser")
        results += getResultDic(soup)
print "數(shù)據(jù)獲取完畢 ! "

# 格式化顯示數(shù)據(jù) : 
for result in results:
    file = open(modelName + "-" + keyword + ".txt","a+")
    file.write("---------------------------\n")
    file.write("標題 : " + result['title'] + "\n")
    file.write("描述 : \n\t" + result['describe'] + "\n")
    file.write("時間 : " + result['time'] + "\n")
    file.write("點擊量 : " + result['click'] + "\n")
    file.write("下載地址 : " + "\n")
    for downloadlink in result['downloadLink']:
        file.write("\t" + downloadlink + "\n")
    file.write("\n")
    file.close()

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點,簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。

推薦閱讀更多精彩內(nèi)容

  • 病痛也應(yīng)是對人的磨練,有的人拼命堅持,有的人大呼小叫,但結(jié)果都是一樣的恢復(fù)如初。也許這是人生中的一個小挫折,或是一...
    preacher6閱讀 114評論 0 0
  • 風(fēng)吹殘花落日圓, 倦鳥不知是秋天; 秋雨打破荷花葉, 微皺池塘起波瀾。 漫山遍野泛金黃, 顆粒歸倉堆成山; 小燕南...
    琢玉書生閱讀 347評論 6 5
  • 想想有時我們太年輕,不僅僅是生理上,更是心理上的稚嫩。這樣的年少輕狂,和還未獨立易受影響的心智,讓每一個我們都經(jīng)歷...
    JoJo之迷閱讀 187評論 0 0