scrapy-Redis分布式爬虫案例----阳光问政平台爬虫

Posted eliwang

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了scrapy-Redis分布式爬虫案例----阳光问政平台爬虫相关的知识,希望对你有一定的参考价值。

我们将之前的阳光热线问政平台爬虫案例,改写成Scrapy-redis分布式爬虫

1.items.py

import scrapy

class MyprojectItem(scrapy.Item):
    number = scrapy.Field() #帖子编号
    title = scrapy.Field() #帖子标题
    content = scrapy.Field() #帖子内容
    status = scrapy.Field() #处理状态

2.settings.py

BOT_NAME = \'myProject\'

SPIDER_MODULES = [\'myProject.spiders\']
NEWSPIDER_MODULE = \'myProject.spiders\'

ROBOTSTXT_OBEY = False
COOKIES_ENABLED = False

# 指定使用scrapy-redis的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"

# 指定使用scrapy-redis的去重过滤器
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"

#默认情况下,RFPDupeFilter只记录第一个重复请求。将DUPEFILTER_DEBUG设置为True会记录所有重复的请求。
DUPEFILTER_DEBUG =True

# 指定排序爬取地址时使用的队列,
# 按优先级排序(Scrapy默认),由sorted set实现的一种非FIFO、LIFO方式。
#SCHEDULER_QUEUE_CLASS = \'scrapy_redis.queue.SpiderPriorityQueue\'
# 可选的 按先进先出排序(FIFO)
# SCHEDULER_QUEUE_CLASS = \'scrapy_redis.queue.SpiderQueue\'
# 可选的 按后进先出排序(LIFO)
# SCHEDULER_QUEUE_CLASS = \'scrapy_redis.queue.SpiderStack\'

# 在redis中保持scrapy-redis用到的各个队列,从而允许暂停和暂停后恢复,也就是不清理redis queues
SCHEDULER_PERSIST = True

DOWNLOAD_DELAY = 1.0
# 支持随机下载延迟
RANDOMIZE_DOWNLOAD_DELAY = True

DOWNLOADER_MIDDLEWARES = {
   \'myProject.middlewares.MyprojectDownloaderMiddleware\': 543,
}

ITEM_PIPELINES = {
   # \'myProject.pipelines.MyprojectPipeline\': 300,
    \'scrapy_redis.pipelines.RedisPipeline\': 500
}

#指定数据库连接参数,默认是本地
REDIS_HOST = \'xxx\'
REDIS_PORT = 6379

USER_AGENTS = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (Khtml, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
        "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
        "Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8",
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3",
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
        "Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 ",
        "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3",
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5",
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14",
        "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"
    ]

3.middlewares.py

import random
import json
import redis

from myProject.settings import USER_AGENTS

class MyprojectDownloaderMiddleware:
    def spider_opened(self, spider):
        self.r = redis.StrictRedis()  # 创建redis数据库连接客户端,用于取里面存储的代理Ip和port
        spider.logger.info(\'Spider opened: %s\' % spider.name)
    
    def process_request(self, request, spider):
        #取出redis数据库中定时更新的代理ip
        proxy_list = json.loads(self.r.get(\'proxy_list\'))
        proxy = random.choice(proxy_list)
        request.headers[\'User-Agent\'] = random.choice(USER_AGENTS) #设置user-agent
        request.meta[\'proxy\'] =\'http://\'+proxy[\'ip\']+\':\'+str(proxy[\'port\']) #设置ip代理

    return None

4.spiders/sun.py

from myProject.items import MyprojectItem
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from scrapy_redis.spiders import RedisCrawlSpider #导入Redis爬虫基类

class SunSpider(RedisCrawlSpider):#继承Redis的爬虫基类
    name = \'sun\'
    allowed_domains = [\'wz.sun0769.com\']
    redis_key = \'sunspider:start_urls\' #增加redis_key

    rules = (
        Rule(LinkExtractor(allow=r\'id=\\d+&page=\\d+\')),#匹配每个页面
        Rule(LinkExtractor(allow=r\'politics/index\\?id=\\d+\'), callback=\'parse_item\'),#匹配每个帖子链接
    )

    def parse_item(self, response):
        item = MyprojectItem()
        title = response.xpath(\'//div[@class="mr-three"]/p[@class="focus-details"]/text()\').extract()[0] #投诉标题
        status = response.xpath(\'//div[@class="focus-date clear focus-date-list"]/span[3]/text()\').extract()[0].split()[1] #处理状态
        number = response.xpath(\'//div[@class="focus-date clear focus-date-list"]/span[4]/text()\').extract()[0].split(\'\')[-1] #投诉编号
        content = response.xpath(\'//div[@class="details-box"]/pre/text()\').extract()[0] #投诉内容
        item[\'title\'] = title
        item[\'status\'] = status
        item[\'number\'] = number
        item[\'content\'] = content
        yield item
        

5.运行

  • Slaver端运行

    scrapy runspider sun.py

    执行命令后,程序处于等待

  • Master端redis数据库执行以下命令:

    lpush sunspider:start_urls http://wz.sun0769.com/political/index/politicsNewest?id=1&page=1
  • slaver端爬虫程序开始爬取

以上是关于scrapy-Redis分布式爬虫案例----阳光问政平台爬虫的主要内容,如果未能解决你的问题,请参考以下文章

爬虫 - scrapy-redis分布式爬虫

Scrapy-Redis使用教程&&将现有爬虫修改为分布式爬虫

scrapy-redis分布式爬虫实战

Python爬虫scrapy-redis分布式实例

scrapy-redis分布式爬虫

10 给予scrapy-redis的分布式爬虫