Scrapy 从站点下载 json 文件?
Posted
技术标签:
【中文标题】Scrapy 从站点下载 json 文件?【英文标题】:Scrapy downloading json-files from site? 【发布时间】:2022-01-22 05:34:09 【问题描述】:我试图创建一个爬虫来从一个站点下载一些 json 文件 -
这是我的爬虫蜘蛛: (首先测试了蜘蛛 - 所以它只输出到 json 文件的链接,它工作正常 - 请参阅下面的注释代码) 但我想将 json 文件下载到我电脑上的文件夹中。
import scrapy
class spiderWords(scrapy.Spider):
name = 'spiderWords'
allowed_domains = ['kaikki.org']
start_urls = ['https://kaikki.org/dictionary/Spanish/words.html']
def parse(self, response):
tmpLinks = response.xpath("(//ul)[2]/li/a/@href").getall()
for l in tmpLinks:
l = response.urljoin(l)
request = scrapy.Request(l,
callback=self.parseDetails)
yield request
def parseDetails(self, response):
tmpLinks2 = response.xpath("(//ul)[2]/li/a/@href").getall()
for l2 in tmpLinks2:
l2 = response.urljoin(l2)
request = scrapy.Request(l2,
callback=self.parseDownload)
yield request
def parseDownload(self, response):
class DownfilesItem(scrapy.Item):
file_urls = scrapy.Field()
files = scrapy.Field
tmpDownloadLink = response.xpath("//p[contains(text(), 'JSON')]/a/@href").get()
tmpDownloadLink = response.urljoin(tmpDownloadLink)
item = DownfilesItem()
item['file_urls'] = tmpDownloadLink
yield item
# yield
# "link": tmpDownloadLink,
#
这是我在 settings.py 中所做的更改:
ITEM_PIPELINES =
'scrapy.pipelines.files.FilesPipeline': 1,
IMAGES_STORE = r'C:\DOWNLOAD\DATASETS\Spanish'
但不幸的是,无法下载 json 文件。
如何将 json 文件下载到定义的文件夹中?
【问题讨论】:
【参考方案1】:你有两个问题。
item['file_urls']
应该是一个列表。
IMAGES_STORE
应该是 FILES_STORE
import scrapy
class spiderWords(scrapy.Spider):
name = 'spiderWords'
allowed_domains = ['kaikki.org']
start_urls = ['https://kaikki.org/dictionary/Spanish/words.html']
def parse(self, response):
tmpLinks = response.xpath("(//ul)[2]/li/a/@href").getall()
for l in tmpLinks:
l = response.urljoin(l)
request = scrapy.Request(l,
callback=self.parseDetails)
yield request
def parseDetails(self, response):
tmpLinks2 = response.xpath("(//ul)[2]/li/a/@href").getall()
for l2 in tmpLinks2:
l2 = response.urljoin(l2)
request = scrapy.Request(l2,
callback=self.parseDownload)
yield request
def parseDownload(self, response):
class DownfilesItem(scrapy.Item):
file_urls = scrapy.Field()
files = scrapy.Field()
tmpDownloadLink = response.xpath("//p[contains(text(), 'JSON')]/a/@href").get()
tmpDownloadLink = response.urljoin(tmpDownloadLink)
item = DownfilesItem()
item['file_urls'] = [tmpDownloadLink]
yield item
# yield
# "link": tmpDownloadLink,
#
编辑:
要设置文件名,请执行以下操作:
settings.py:
ITEM_PIPELINES =
'yourprojectname.pipelines.ProcessPipeline': 1,
FILES_STORE = r'C:\DOWNLOAD\DATASETS\Spanish'
pipelines.py:
import os
from urllib.parse import unquote
from scrapy.pipelines.files import FilesPipeline
class ProcessPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None, *, item=None):
file_name = os.path.basename(unquote(request.url))
return file_name
编辑 2:
将附加信息写入文件:
import json
import scrapy
class spiderWords(scrapy.Spider):
name = 'spiderWords'
allowed_domains = ['kaikki.org']
start_urls = ['https://kaikki.org/dictionary/Spanish/words.html']
erg =
def parse(self, response):
tmpLinks = response.xpath("(//ul)[2]/li/a/@href").getall()
for l in tmpLinks:
l = response.urljoin(l)
request = scrapy.Request(l, callback=self.parseDetails)
yield request
def parseDetails(self, response):
tmpLinks2 = response.xpath("(//ul)[2]/li/a/@href").getall()
for l2 in tmpLinks2:
l2 = response.urljoin(l2)
request = scrapy.Request(l2, callback=self.parseDownload)
yield request
def parseDownload(self, response):
class DownfilesItem(scrapy.Item):
file_urls = scrapy.Field()
files = scrapy.Field()
key = response.xpath('//ul[@class="breadcrumb"]/li[last()]/text()').get()
self.erg[key] = response.url
tmpDownloadLink = response.xpath("//p[contains(text(), 'JSON')]/a/@href").get()
tmpDownloadLink = response.urljoin(tmpDownloadLink)
item = DownfilesItem()
item['file_urls'] = [tmpDownloadLink]
yield item
def close(self, reason):
with open('erg.json', 'w') as f:
f.write(json.dumps(self.erg, indent=4))
【讨论】:
非常感谢您的帮助 - 现在效果很好!也许还有一个问题——有什么方法可以更改保存文件的名称吗? (它现在有一些生成的密钥,如 5a032ff2.json - 但我想有,可能在开头附加 - tmpDownloadLink 中的链接名称)这可能吗? 再次感谢您 - 这也很棒!也许最后一个问题 - 是否也有可能以某种方式向 json 文件产生额外的一些输出(例如,像我们现在所做的那样将文件产生到光盘 - 但额外创建一个 erg.json ,例如链接或一些其他数据? @Rapid1898 有不止一种方法可以做到这一点。有关示例,请参阅edit 2
。以上是关于Scrapy 从站点下载 json 文件?的主要内容,如果未能解决你的问题,请参考以下文章