需要準備 redis mongodb scrapy-redis ?這些自己百度安裝?
1.對要爬取的頁面進行分析。。。因爬取時候沒使用代理現在ip已經被屏蔽 所以明天進行分析 今天上代碼
代碼分析 這是沒有使用redis的爬蟲?
# -*- coding: utf-8 -*-
importscrapy
fromscrapy.httpimportRequest
frombooszp.itemsimportBooszpItem
#from scrapy_redis.spiders import RedisSpider
classBoosSpider(RedisSpider):
name ="boos"
#? redis_key='boos:start_urls'
start_urls = ['http://www.zhipin.com/job_detail/?query=php&scity=100010000&source=1']
allowed_domains = ["www.zhipin.com"]
# def start_requests(self):
#for url in self.start_urls:
# yield Request(url=url,callback=self.parse)
defparse(self,response):
lianjie =response.xpath('//div[@class="job-primary"]')
fordizhiinlianjie:
item = BooszpItem()
item['name'] = dizhi.xpath('./div[@class="info-primary"]/h3[@class="name"]/text()').extract()
item['xinzi']= dizhi.xpath('./div[@class="info-primary"]/h3[@class="name"]/span[@class="red"]/text()').extract()
item['dizhi']= dizhi.xpath('./div[@class="info-primary"]/p/text()').extract()
item['gongsi']=dizhi.xpath('./div[@class="info-company"]/div[@class="company-text"]/h3[@class="name"]/text()').extract()
yielditem
jj = response.xpath('//div[@class="page"]/a/@href').extract()[-1]
ifjj !='javascript:;':
ff ='http://www.zhipin.com/'+jj
yieldRequest(url=ff,callback=self.parse,dont_filter = True)
# -*- coding: utf-8 -*-
importscrapy
fromscrapy.httpimportRequest
frombooszp.itemsimportBooszpItem
fromscrapy_redis.spidersimportRedisSpider
classBoosSpider(RedisSpider):
name ="boos"
redis_key='boos:start_urls'
#? ? start_urls = ['http://www.zhipin.com/job_detail/?query=php&scity=100010000&source=1']
allowed_domains = ["www.zhipin.com"]
defstart_requests(self):
forurlinself.start_urls:
yieldRequest(url=url,callback=self.parse)
defparse(self,response):
lianjie =response.xpath('//div[@class="job-primary"]')
fordizhiinlianjie:
item = BooszpItem()
item['name'] = dizhi.xpath('./div[@class="info-primary"]/h3[@class="name"]/text()').extract()
item['xinzi']= dizhi.xpath('./div[@class="info-primary"]/h3[@class="name"]/span[@class="red"]/text()').extract()
item['dizhi']= dizhi.xpath('./div[@class="info-primary"]/p/text()').extract()
item['gongsi']=dizhi.xpath('./div[@class="info-company"]/div[@class="company-text"]/h3[@class="name"]/text()').extract()
yielditem
jj = response.xpath('//div[@class="page"]/a/@href').extract()[-1]
ifjj !='javascript:;':
ff ='http://www.zhipin.com/'+jj
yieldRequest(url=ff,callback=self.parse,dont_filter = True)
importscrapy
classBooszpItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name =scrapy.Field()
xinzi=scrapy.Field()
dizhi =scrapy.Field()
jingyan=scrapy.Field()
dengji=scrapy.Field()
gongsi=scrapy.Field()
from scrapy importsignals
import random
from scrapy.confimportsettings
classUAMiddleware(object):
user_agent_list = settings['USER_AGENT_LIST']
defprocess_request(self,request,spider):
ua = random.choice(self.user_agent_list)
request.headers['User-Agent'] = ua
classProxyMiddleware(object):
ip_list = settings['IP_LIST']
defprocess_request(self,request,spider):
ip = random.choice(self.ip_list)
printip
request.meta['proxy'] = ip
settings.py
DOWNLOADER_MIDDLEWARES = {
'booszp.middlewares.UAMiddleware':543,#獲取middlewares 的user
'booszp.middlewares.ProxyMiddleware':544,#獲取 ip代理
}
ITEM_PIPELINES = {
'booszp.pipelines.BooszpPipeline':300,
#'scrapy_redis.pipelines.RedisPipeline':301
}
SCHEDULER ="scrapy_redis.scheduler.Scheduler"#首先是Scheduler的替換,
# 這個東西是Scrapy中的調度員
DUPEFILTER_CLASS ="scrapy_redis.dupefilter.RFPDupeFilter"#去重
SCHEDULER_PERSIST = False
#如果這一項為True,那么在Redis中的URL不會被Scrapy_redis清理掉,
# 這樣的好處是:爬蟲停止了再重新啟動,它會從上次暫停的地方開始繼續爬取
# 。但是它的弊端也很明顯,如果有多個爬蟲都要從這里讀取URL,需要另外寫一段代碼來防止重復爬取。
#如果設置成了False,那么Scrapy_redis每一次讀取了URL以后,就會把這個URL給刪除。
# 這樣的好處是:多個服務器的爬蟲不會拿到同一個URL,也就不會重復爬取。但弊端是:爬蟲暫停以后再重新啟動,它會重新開始爬。
SCHEDULER_QUEUE_CLASS ='scrapy_redis.queue.SpiderQueue'
#爬蟲請求的調度算法
USER_AGENT_LIST = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36',
'Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11']
IP_LIST = ['http://122.226.62.90:3128',#代理ip
'http://101.200.40.47:3128',
'http://84.52.115.139:8080']
REDIS_HOST ='127.0.0.1'#修改為Redis的實際IP地址
REDIS_PORT =6379#修改為Redis的實際端口
MONGODB_HOST='127.0.0.1'# mongodb
MONGODB_POST =27017
MONGODB_DBNAME='boos'
MONGODB_DOCNAME='boos3'
importpymongo
fromscrapy.confimportsettings
classBooszpPipeline(object):
def__init__(self):
client = pymongo.MongoClient()
db = client[settings['MONGODB_DBNAME']]
self.post = db[settings['MONGODB_DOCNAME']]
defprocess_item(self,item,spider):
book_info =dict(item)
self.post.insert(book_info)
returnitem
然后啟動redis 喂鏈接就可以了
啟動爬蟲就行了?
然后使用scrapyd 進行項目部署
啟動 scrapyd?
在重新打開cmd
curl http://localhost:6800/listprojects.json 查看爬蟲信息
curl http://localhost:6800/schedule.json -d project=booszp -d spider=boos
啟動爬蟲
在瀏覽器輸入localhost:6500
然后去redis 喂條url
如有問題請留言