使用Python同步&异步爬取某视频
Posted 莜莜@爬小虫联盟
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了使用Python同步&异步爬取某视频相关的知识,希望对你有一定的参考价值。
某视频是什么想必大家多少都有所了解,最开始接触某视频时,我曾被很多视频感动到,甚至现在,也是同样,正如我一直强调的,“所有的技巧,都抵不上一个真实故事的分量”。那触动人心的感觉,确实让人们在深切体悟着这个世界!了解这个世界!
而人生来,也不过就是感知世界,适应世界,改变世界。从这一点上,我倒挺喜欢某视频的。因为它扩宽了我们的视野,让我们知道了世界上原来还有这样的事情存在。此外,某视频还能时刻提醒我们,世间百态,世间苦难,让我们觉得自己其实是幸福的也挺好。
综上简单的阐述了下,引入我们今天想要爬取的目标,即爬取某视频上面的视频,分享内容主要分为同步爬取和异步爬取,并且对爬起时间做了对比,因为代码的难度也不是很大,所以不做过多赘述。异步使用的是asyncio,在前些篇也有做过相关的练习: ~异步爬取某者荣耀~ ,~为什么使用异步来写爬虫~,有兴趣的可以再去看看,做做练习。
同步代码如下:
# coding:utf-8
# __auth__ = "maiz"
import os
import re
import random
import requests
from datetime import datetime
from lxml import etree
class Sync(object):
headers = {
\'User-Agent\':
\'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (Khtml, like Gecko) Chrome/72.0.3626.121 Safari/537.36\'
}
download_folder = "./videos"
def run(self):
url = \'https://www.pearvideo.com/category_5\'
if os.path.exists(self.download_folder): # 检查是否存在这个文件夹
print("文件夹已存在")
else:
os.mkdir(self.download_folder) # 不存在则创建
print("文件夹已创建")
resp = requests.get(url, headers=self.headers)
if resp.status_code == 200:
tree = etree.HTML(resp.text)
lis = tree.xpath(\'//ul[@id="categoryList"]/li\')
else:
raise requests.RequestException
for li in lis:
filename, download_url = self.parse_video_url(li)
print(f"==> 开始下载 {filename}")
self.download(filename, download_url)
def parse_video_url(self, li) -> tuple:
title = li.xpath(\'./div/a/div[2]/text()\')[0].strip(\'“”!?\').replace("| ", "").replace(" | ", "")
page = str(li.xpath(\'./div/a/@href\')[0]).split(\'_\')[1]
ajax_url = \'https://www.pearvideo.com/videoStatus.jsp?\'
params = {\'contId\': page, \'mrd\': random.random()}
headers = self.headers.copy()
headers.update({\'Referer\': \'https://www.pearvideo.com/video_\' + page})
resp = requests.get(ajax_url, headers=headers, params=params)
ajax_text = resp.json()
download_url = ajax_text["videoInfo"][\'videos\']["srcUrl"]
download_url = re.sub(r"\\d{13}", f"cont-{page}", download_url)
return title + ".mp4", download_url
def download(self, filename: str, url: str):
resp = requests.get(url, headers=self.headers)
if resp.status_code == 200:
content = resp.content
with open(os.path.join(self.download_folder, filename), "wb") as fb:
fb.write(content)
print(f"已下载:{filename}")
print("-" * 60)
else:
raise requests.RequestException
if __name__ == \'__main__\':
start = datetime.now()
s = Sync()
s.run()
end = datetime.now()
print((end - start).total_seconds(), "秒")
小编最近发现一个免费领取代理ip的平台 有需要的可以点击领取下PC端:http://i0k.cn/4KzbY 移动端http://i0k.cn/53dbO
异步代码如下:
# coding:utf-8
# __auth__ = "maiz"
import os
import re
import random
import asyncio
import aiofiles
import aiohttp
from datetime import datetime
from lxml import etree
class Spider(object):
headers = {
\'User-Agent\':
\'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\'
}
download_folder = "./videos"
urls = []
async def main(self):
await self._get_video_urls()
downloader = [asyncio.create_task(self._download_video(filename, url)) for filename, url in self.urls]
await asyncio.gather(*downloader)
async def _get_video_urls(self):
url = \'https://www.pearvideo.com/category_5\'
async with aiohttp.ClientSession(headers=self.headers) as session:
async with session.get(url) as response:
if response.status == 200:
text = await response.text()
tree = etree.HTML(text)
lis = tree.xpath(\'//ul[@id="categoryList"]/li\')
else:
raise aiohttp.ClientResponseError
spider = [self._parse_video_url(li) for li in lis]
await asyncio.wait(spider)
async def _parse_video_url(self, li):
title = li.xpath(\'./div/a/div[2]/text()\')[0].strip(\'“”!?\').replace("| ", "").replace(" | ", "")
page = str(li.xpath(\'./div/a/@href\')[0]).split(\'_\')[1]
ajax_url = \'https://www.pearvideo.com/videoStatus.jsp?\'
params = {\'contId\': page, \'mrd\': random.random()}
headers = self.headers.copy()
headers.update({\'Referer\': \'https://www.pearvideo.com/video_\' + page})
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(ajax_url, params=params) as response:
ajax_text = await response.json()
download_url = ajax_text["videoInfo"][\'videos\']["srcUrl"]
download_url = re.sub(r"\\d{13}", f"cont-{page}", download_url)
self.urls.append((title + ".mp4", download_url))
async def _download_video(self, filename: str, url: str):
async with aiohttp.ClientSession(headers=self.headers) as session:
print(f"开始下载 => {filename}")
async with session.get(url, headers=self.headers) as response:
content = await response.read()
async with aiofiles.open(os.path.join(self.download_folder, filename), "wb") as fb:
await fb.write(content)
print(f"已下载 => {filename}.mp4")
def run(self):
if os.path.exists(self.download_folder): # 检查是否存在这个文件夹
print("文件夹已存在")
else:
os.mkdir(self.download_folder) # 不存在则创建
print("文件夹已创建")
loop = asyncio.get_event_loop()
loop.run_until_complete(self.main())
if __name__ == \'__main__\':
start = datetime.now()
s = Spider()
s.run()
end = datetime.now()
print("=" * 40)
print((end - start).total_seconds(), "秒")
右击运行代码,即可在当前文件夹下,生成一个videos文件夹,并下载相关的视频文件。快去看看有哪些你感兴趣的内容吧,代码获取后台回复:”某视频下载“。
以上就是今天给大家分享的内容
以上是关于使用Python同步&异步爬取某视频的主要内容,如果未能解决你的问题,请参考以下文章