多线程高容错爬头条街拍美图
Posted
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了多线程高容错爬头条街拍美图相关的知识,希望对你有一定的参考价值。
- 分析头条的ajax,通过正则表达式,python3多线程高容错爬取头条的街拍美图,保存到mongodb,并下载图片
- 头条的内容网页较之前已经改版,图床页不仅有ajax的还有html的内容网页
- 所以使用了两种正则,根据条件调用
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author:Aiker
@file:toutiao.py
@time:下午9:35
"""
import json
import os
import re
from json import JSONDecodeError
from multiprocessing import Pool
from urllib.parse import urlencode
from hashlib import md5
import pymongo
import requests
from requests.exceptions import RequestException
MONGO_URL = ‘localhost:27017‘
MONGO_DB = ‘toutiao‘
MONGO_TABLE = ‘toutiao‘
GROUP_START = 1
GROUP_END = 20
KEYWORD = ‘街拍‘
client = pymongo.MongoClient(MONGO_URL, connect=False)
db = client[MONGO_DB]
headers = {
‘user-agent‘: ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36‘
}
def get_url(url):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
print(‘请求失败‘, url)
return None
def get_page_index(offset, keyword):
data = {
‘aid‘: ‘24‘,
‘app_name‘: ‘web_search‘,
‘offset‘: offset,
‘format‘: ‘json‘,
‘keyword‘: keyword,
‘autoload‘: ‘true‘,
‘count‘: ‘20‘,
‘en_qc‘: ‘1‘,
‘cur_tab‘: ‘1‘,
‘from‘: ‘search_tab‘,
‘pd‘: ‘synthesis‘,
‘timestamp‘: ‘1124216535987‘
}
url = ‘https://www.toutiao.com/api/search/content/?‘ + urlencode(data) # 字典对象转化url对象
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
print(‘请求索引页失败‘)
return None
def parse_page_index(html):
try:
data = json.loads(html) # 转化为json对象
if data and ‘data‘ in data.keys():
# print(data.keys()) #调试,输出所有key
for item in data.get(‘data‘):
if ‘article_url‘ in item: # 判断是否存在,避免出现None
# print(item)
yield item.get(‘article_url‘) # 构造生成器
except JSONDecodeError:
pass
except TypeError:
pass
def get_page_detail(url):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
print(‘请求详情页出错‘, url)
return None
def parse_page_detail(html, url):
pattern = re.compile("articleInfo:.*?title:s‘(.*?)‘,.*?content:s‘(.*?)‘.*?groupId", re.S)
result = re.findall(pattern, html)
# print(tc)
if result:
title, content = result[0]
pattern = re.compile("(http://.*?)"", re.S)
images = re.findall(pattern, content)
# print(img)
for image in images: download_image(image, title)
# print(item)
return {
‘title‘: title,
‘url‘: url,
‘images‘: images
}
else:
pattern = re.compile(‘BASE_DATA.galleryInfo.*?title:s‘(.*?)‘.*?gallery: JSON.parse("(.*)")‘, re.S)
result = re.findall(pattern, html)
# print(result[0])
if result:
title, content = result[0]
data = json.loads(content.replace(‘\‘, ‘‘))
# print(data)
if data and ‘sub_images‘ in data.keys():
sub_images = data.get(‘sub_images‘)
images = [item.get(‘url‘) for item in sub_images]
for image in images: download_image(image,title)
return {
‘title‘: title,
‘url‘: url,
‘images‘: images
}
def save_to_mongo(result):
if db[MONGO_TABLE].insert(result):
print(‘存储到MongoDB成功‘, result)
return True
return False
def download_image(url,title):
print(‘正在下载‘, url)
try:
response = requests.get(url)
if response.status_code == 200:
save_image(response.content,title)
return None
except RequestException:
print(‘请求图片出错‘, url)
return None
def save_image(content,title):
try:
if title:
title = re.sub(‘[:?!!:?]‘, ‘‘, title) # 替换title中的特殊字符,避免建立资料夹目录出错
dir = ‘z:\toutiao\‘
if os.path.exists(dir + title):
pass
else:
os.mkdir(dir + title)
file_path = ‘{0}/{1}.{2}‘.format( dir + title, md5(content).hexdigest(), ‘jpg‘)
if not os.path.exists(file_path):
with open(file_path, ‘wb‘) as f:
f.write(content)
f.close()
except OSError:
pass
def main(offset):
html = get_page_index(offset, KEYWORD)
for url in parse_page_index(html):
print(url)
html = get_page_detail(url)
if html:
result = parse_page_detail(html, url)
if result:
save_to_mongo(result)
# print(html)
if __name__ == ‘__main__‘:
# main()
groups = [x * 20 for x in range(GROUP_START, GROUP_END + 1)]
pool = Pool()
pool.map(main, groups)
pool.close()
pool.join()
- 下载图片,并保存到mongodb
以上是关于多线程高容错爬头条街拍美图的主要内容,如果未能解决你的问题,请参考以下文章