import threading
import json
import requests
from lxml import etree
from Queue import Queue
import time
class ThreadCrawl(threading.Thread):
def __init__(self, threadname, pageQueue, dataQueue):
super(ThreadCrawl, self).__init__()
self.threadname = threadname
self.pageQueue = pageQueue
self.dataQueue = dataQueue
self.headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
def run(self):
print "啟動"+self.threadname
while not CRAWL_EXIT:
try:
page = self.pageQueue.get(False)
url = "https://www.qiushibaike.com/8hr/page/"+str(page)+"/"
content = requests.get(url, headers=self.headers).text
time.sleep(1)
self.dataQueue.put(content)
except:
pass
print "結束"+self.threadname
class ThreadParse(threading.Thread):
def __init__(self, threadname, dataQueue, filename, lock):
super(ThreadParse, self).__init__()
self.threadname = threadname
self.dataQueue = dataQueue
self.filename= filename
self.lock = lock
def run(self):
while not PARSE_EXIT:
try:
html = self.dataQueue.get(False)
self.parse(html)
except:
pass
def parse(self,html):
html = etree.HTML(html)
result = html.xpath('//div[contains(@id,"qiushi_tag")]')
for res in result:
username = res.xpath('.//img/@alt')[0]
pic = res.xpath('./div/a/img/@src')
duanzi = res.xpath('.//div[@class="content"]/span')[0].text.strip()
zan = res.xpath('.//i')[0].text
comment = res.xpath('.//i')[1].text
items = {
"username": username,
"image": pic,
"content": duanzi,
"zan": zan,
"comment": comment
}
with self.lock:
self.filename.write(json.dumps(items, ensure_ascii=False).encode('utf=8')+"\n")
CRAWL_EXIT = False
PARSE_EXIT = False
def main():
pageQueue = Queue(20)
for i in range(1, 21):
pageQueue.put(i)
dataQueue = Queue()
filename = open("duanzi.json", "a")
lock = threading.Lock()
crawlList = ["采集線程1號", "采集線程2號", "采集線程3號"]
threadcrawl = []
for threadname in crawlList:
thread = ThreadCrawl(threadname, pageQueue, dataQueue)
thread.start()
threadcrawl.append(thread)
parseList = ["解析線程1號", "解析線程2號", "解析線程3號"]
threadparse = []
for threadname in parseList:
thread = ThreadParse(threadname, dataQueue, filename, lock)
thread.start()
threadparse.append(thread)
while not pageQueue.empty():
pass
global CRAWL_EXIT
CRAWL_EXIT = True
for thread in threadcrawl:
thread.join()
print "1"
while not dataQueue.empty():
pass
global PARSE_EXIT
PARSE_EXIT = True
for thread in threadparse:
thread.join()
print "2"
with lock:
filename.close()
print "謝謝使用"
if __name__ == "__main__":
main()
多線程爬糗事百科
?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。
推薦閱讀更多精彩內容
- 多線程糗事百科案例 案例要求參考上一個糗事百科單進程案例 Queue(隊列對象) Queue是python中的標準...