爬虫——Scrapy框架案例二:阳光问政平台

Posted 骑着螞蟻流浪

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了爬虫——Scrapy框架案例二:阳光问政平台相关的知识,希望对你有一定的参考价值。

阳光热线问政平台

URL地址:http://wz.sun0769.com/index.php/question/questionType?type=4&page=

爬取字段:帖子的编号、投诉类型、帖子的标题、帖子的URL地址、部门、状态、网友、时间。

1.items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy

class SunwzspiderItem(scrapy.Item):
    # define the fields for your item here like:
    # 爬取投诉帖子的编号、投诉类型、帖子的标题、帖子的URL、部门、状态、网友、时间。
    # 帖子的编号
    post_id = scrapy.Field()
    # 投诉类型
    post_type = scrapy.Field()
    # 帖子的标题
    post_title = scrapy.Field()
    # 帖子的URL
    post_url = scrapy.Field()
    # 部门
    sector = scrapy.Field()
    # 状态
    post_state = scrapy.Field()
    # 网友
    net_friend = scrapy.Field()
    # 时间
    post_time = scrapy.Field()

2.spiders/sunwz.py

# -*- coding: utf-8 -*-
import scrapy
from sunwzSpider.items import SunwzspiderItem

class SunwzSpider(scrapy.Spider):
    name = ‘sunwz‘
    allowed_domains = [‘wz.sun0769.com‘]
        url = "http://wz.sun0769.com/index.php/question/questionType?type=4&page="
    offset = 0
    start_urls = [url + str(offset)]

    def parse(self, response):
        table = response.xpath("//table[@width=‘98%‘]")[0]
        trs = table.xpath("./tr")
        # 是否爬取下一页的标记
        next_flag = False
        for tr in trs:
            next_flag = True
            try:
                item = SunwzspiderItem()
                # 帖子的编号
                post_id = tr.xpath("./td/text()").extract()[0]
                td2 = tr.xpath("./td")[1]
                # 投诉类型
                post_type = td2.xpath("./a/text()").extract()[0]
                # 帖子的标题
                post_title = td2.xpath("./a/text()").extract()[1]
                # 帖子的URL
                post_url = td2.xpath("./a/@href").extract()[1]
                # 部门
                sector = td2.xpath("./a/text()").extract()[2]
                td3 = tr.xpath("./td")[2]
                # 状态
                post_state = td3.xpath("./span/text()").extract()[0]
                # 网友
                net_friend = tr.xpath("./td/text()").extract()[3]
                # 时间
                post_time = tr.xpath("./td/text()").extract()[4]

                item["post_id"] = post_id
                item["post_type"] = post_type
                item["post_title"] = post_title
                item["post_url"] = post_url
                item["sector"] = sector
                item["post_state"] = post_state
                item["net_friend"] = net_friend
                item["post_time"] = post_time

                yield item
            except:
                pass

        # 判断是否继续爬取下一页
        if next_flag:
            self.offset += 30
            yield scrapy.Request(self.url + str(self.offset), callback = self.parse)

3.pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don‘t forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

import json

class SunwzspiderPipeline(object):
    def __init__(self):
        self.file = open("阳光问政平台.json", "w", encoding = "utf-8")
        self.first_flag = True

    def process_item(self, item, spider):
        if self.first_flag:
            self.first_flag = False
            content = "[\n" + json.dumps(dict(item), ensure_ascii = False)
        else:
            content = ",\n" + json.dumps(dict(item), ensure_ascii = False)
        self.file.write(content)

        return item

    def close_spider(self, spider):
        self.file.write("\n]")
        self.file.close()

4.settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for sunwzSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = ‘sunwzSpider‘

SPIDER_MODULES = [‘sunwzSpider.spiders‘]
NEWSPIDER_MODULE = ‘sunwzSpider.spiders‘


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = ‘sunwzSpider (+http://www.yourdomain.com)‘

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
   ‘Accept‘: ‘text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8‘,
   ‘User-Agent‘: ‘Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36‘
#   ‘Accept-Language‘: ‘en‘,
}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    ‘sunwzSpider.middlewares.SunwzspiderSpiderMiddleware‘: 543,
#}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    ‘sunwzSpider.middlewares.MyCustomDownloaderMiddleware‘: 543,
#}

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    ‘scrapy.extensions.telnet.TelnetConsole‘: None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    ‘sunwzSpider.pipelines.SunwzspiderPipeline‘: 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = ‘httpcache‘
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = ‘scrapy.extensions.httpcache.FilesystemCacheStorage‘

 

以上是关于爬虫——Scrapy框架案例二:阳光问政平台的主要内容,如果未能解决你的问题,请参考以下文章

爬取阳光问政平台

11-scrapy(递归解析,post请求,日志等级,请求传参)

爬虫框架Scrapy之案例二

(Scrapy框架)爬虫获取百度新冠疫情数据 | 爬虫案例

scrapy爬虫框架之Xpath选择器

人工智能--基于LSTM的文本挖掘