Scrapy学习第八课
Posted helenandyoyo
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Scrapy学习第八课相关的知识,希望对你有一定的参考价值。
python爬虫框架scrapy学习第八课
目标爬取文章,实现文本和图片数据存储
文本数据以json文件存储
文本数据存储在mongodb数据库中
图片保存在本地
爬取地址:伯乐在线文章
爬虫实例
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JobboleItem(scrapy.Item):
#标题
title = scrapy.Field()
#发布日期
create_date = scrapy.Field()
#链接
url = scrapy.Field()
#MD5加密的url
url_object_id = scrapy.Field()
#图片的url
front_image_url = scrapy.Field()
#图片存储路径
front_image_path = scrapy.Field()
#点赞数
praise_nums = scrapy.Field()
#收藏数
fav_nums = scrapy.Field()
#评论数
comment_nums = scrapy.Field()
#标签
tag = scrapy.Field()
#内容
#content = scrapy.Field()
# -*- coding: utf-8 -*-
import scrapy
from urllib.parse import urljoin
from jobBole.items import JobboleItem
import re
import hashlib
import datetime
def get_md5(md5str):
#生成1个MD5对象
m1 = hashlib.md5()
#使用MD5对象你的update方法进行md5转换
m1.update(md5str.encode("utf-8"))
md5ConvertStr = m1.hexdigest()
return md5ConvertStr
class BoleSpider(scrapy.Spider):
name = 'bole'
allowed_domains = ['blog.jobbole.com']
start_urls = ['http://blog.jobbole.com/all-posts/']
def parse(self, response):
'''
1.获取文章列表也中具体文章url,并交给scrapy进行下载后并进行解析
2.获取下一页的url并交给scrapy进行下载,下载完成后,交给parse
:param response:
:return:
'''
#解析列表页中所有文章的url, 并交给scrapy下载并解析
post_nodes = response.css("#archive .floated-thumb .post-thumb a")
for post_node in post_nodes:
#image_url是图片的地址
image_url = post_node.css("img::attr(src)").extract_first("")
post_url = post_node.css("::attr(href)").extract_first("")
#这里通过meta参数将图片的url传递进来,parse.urljoin的好处是如果有域名,则前面的response.url不生效
#如果没有,就会把response.url和post_urlz做拼接
yield scrapy.Request(url=urljoin(response.url, post_url), meta=
"front_image_url": urljoin(response.url, image_url)
,callback = self.parse_detail)
#提取下一页并交给scrapy下载
next_url = response.css(".next.page-numbers::attr(href)").extract_first("")
curr_page = int(response.xpath('//span[@class="page-numbers current"]/text()').extract()[0])
if next_url and curr_page < 3:
yield scrapy.Request(url = next_url, callback = self.parse)
def parse_detail(self, response):
'''
获取文章的详细内容
:param response:
:return:
'''
article_item = JobboleItem()
front_image_url = response.meta.get("front_image_url", "")
title = response.xpath('//div[@class="entry-header"]/h1/text()').extract_first()
create_date = response.xpath('//p[@class="entry-meta-hide-on-mobile"]/text()').extract()[0].strip().split()[0]
tag_list = response.xpath('//p[@class="entry-meta-hide-on-mobile"]/a/text()').extract()
#去掉标签中的评论
tag_list = [element for element in tag_list if -1 == element.find("评论")]
tag = ",".join(tag_list)
praise_nums = response.xpath('//span[contains(@class, "vote-post-up")]/h10/text()') .extract()[0]
print('praise_nums ', praise_nums)
if len(praise_nums) == 0:
praise_nums = 0
else:
praise_nums = int(praise_nums[0])
fav_nums = response.xpath('//span[contains(@class, "bookmark-btn")]/text()').extract()[0]
match_re = re.match(".*(\\d+).*", fav_nums)
if match_re:
fav_nums = int(match_re.group(1))
else:
fav_nums = 0
#print('@@@@ ', response.xpath('//a[@href="#article-comment"]/span/text()').extract())
comment_nums = response.xpath('//a[@href="#article-comment"]/span/text()').extract()[0]
match_com = re.match(".*(\\d+).*", comment_nums)
if match_com:
comment_nums= int(match_com.group(1))
else:
comment_nums = 0
content = response.xpath('//div[@class="entry"]').extract()[0]
article_item['url_object_id'] = get_md5(response.url) #对地址进行md5变成了定长
article_item['title'] = title
article_item['url'] = response.url
try:
create_date = datetime.datetime.strptime(create_date, '%Y/%m/%d').date()
except Exception as e:
create_date = datetime.now().date()
article_item['create_date'] = str(create_date)
article_item['front_image_url'] = [front_image_url]
article_item['praise_nums'] = int(praise_nums)
article_item['fav_nums'] = fav_nums
article_item['comment_nums'] = comment_nums
article_item['tag'] = tag
#article_item['content'] = content
yield article_item
ITEM_PIPELINES =
'jobBole.pipelines.JobbolePipeline': 300,
'jobBole.pipelines.ArticleImagePipeline' : 301,
'jobBole.pipelines.MongoDBTwistedPipline': 302
IMAGES_STORE = 'D:\\SunWork\\python\\jobBole'
MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = 27017
MONGODB_DBNAME = 'bole'
MONGODB_SHEETNAME = 'bolePaper'
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy
from scrapy.pipelines.images import ImagesPipeline
import codecs
import json
import pymongo
from scrapy.conf import settings
class JobbolePipeline(object):
'''
返回json数据到文件中
'''
def __init__(self):
self.file = codecs.open("article.json", 'w',encoding='utf-8')
def process_item(self, item, spider):
print('@@@@@@@@@@ ', item)
lines = json.dumps(dict(item), ensure_ascii=False) + "\\n"
self.file.write(lines)
return item
def spider_closed(self, spider):
self.file.close()
class ArticleImagePipeline(ImagesPipeline):
'''
对图片的处理
'''
def get_media_requests(self, item, info):
for image_url in item['front_image_url']:
yield scrapy.Request(image_url)
def item_completed(self, results, item, info):
for ok, value in results:
if ok:
image_file_path = value['path']
item['front_image_path'] = image_file_path
else:
item['front_image_path'] = ""
return item
class MongoDBTwistedPipline(object):
def __init__(self):
#主机
host = settings["MONGODB_HOST"]
#端口
port = settings["MONGODB_PORT"]
#数据库名
dbname = settings["MONGODB_DBNAME"]
#数据表名
sheetname = settings["MONGODB_SHEETNAME"]
#创建MONGODB数据库
client = pymongo.MongoClient(host=host, port=port)
#指定数据库
mydb = client[dbname]
#指定数据表
self.post = mydb[sheetname]
def process_item(self, item, spider):
data = dict(item)
self.post.insert(data)
return item
注:代码来源https://www.cnblogs.com/zhaof/p/7173094.html。在此基础上进行部分修改。
以上是关于Scrapy学习第八课的主要内容,如果未能解决你的问题,请参考以下文章