Python爬虫:wallhaven图片爬取

Posted 东仔.

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Python爬虫:wallhaven图片爬取相关的知识,希望对你有一定的参考价值。

import re
import requests

headers = 
    'Cookie':"cookie自己在F12网络中获取 不提供"


def toplist():
    a = 0
    one_page_re = '<a class="preview" href="(.*?)"'
    for i in range(0,29):
        url = "https://wallhaven.cc/hot?page="+str(i)
        response = requests.get(url,headers).text
        result_page = re.findall(one_page_re,response,re.S)
        for i in result_page:
            re_two_page = '<img id="wallpaper" src="(.*?)" alt='
            response_two = requests.get(i,headers).text
            response_two_result = re.findall(re_two_page,response_two,re.S)
            for i in response_two_result:
                a += 1
                path = "C:/Users/Administrator/Pictures/"
                name = path+str(a)+".jpg"
                response = requests.get(i,headers).content
                try:
                    with open(name,'wb') as f:
                        f.write(response)
                    f.close()
                    print(">>save=="+str(a)+"==picture success!<<")
                except:
                    print(">>save==" + str(a) + "==picture failed!<<")
                    continue

def random():
    a = 0
    re_one_page = '<a class="preview" href="(.*?)"'
    re_all_page = 'title=".*?">(\\d+.?\\d*)</a>'
    url = 'https://wallhaven.cc/random?seed=SuMDKp&page=2'
    response = requests.get(url).text
    all_page_num = re.findall(re_all_page,response,re.S)
    for i in range(0,int(all_page_num[8])):
        url = 'https://wallhaven.cc/random?seed=SuMDKp&page='+str(i)
        response_pic_text = requests.get(url,headers).text
        pic_url = re.findall(re_one_page,response_pic_text,re.S)
        print('Now page is :'+str(i))
        if len(pic_url) > 0:
            for i in pic_url:
                response_url_get_text = requests.get(i,headers).text
                re_url_get_url = '<img id="wallpaper" src="(.*?)" alt='
                result_pic_url = re.findall(re_url_get_url,response_url_get_text,re.S)
                for i in result_pic_url:
                    a += 1
                    path = "C:/Users/Administrator/Pictures/"
                    name = path + str(a) + ".jpg"
                    response = requests.get(i, headers).content
                    try:
                        with open(name, 'wb') as f:
                            f.write(response)
                        f.close()
                        print(">>save==" + str(a) + "==picture success!<<")
                    except:
                        print(">>save==" + str(a) + "==picture failed!<<")
                        continue
        else:
            continue

if __name__ == '__main__':
    print("<----------------------------------------->")
    print("welcome To wallhave download picture script")
    print("Download hot picture Enter >> 1 <<")
    print("Download random picture Enter >> 2 <<")
    print("Download path default is : C:/Users/Administrator/Pictures/")
    print("<----------------------------------------->")
    a = input("Enter You choose:")
    if (int(a) == 1):
        toplist()
    elif(int(a) == 2):
        random()
    else:
        print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
        print("You Enter num error please try!")
        print("Thank you for using this software. Please restart the software and use it again!")
        print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")

没有做png和jpg判断!!!

以上是关于Python爬虫:wallhaven图片爬取的主要内容,如果未能解决你的问题,请参考以下文章

Python爬虫:wallhaven图片爬取

Python爬虫:wallhaven图片爬取

Python 爬虫-图片的爬取

python3--网络爬虫--爬取图片

python爬虫之爬取百度图片

Python爬虫之爬取页面内容图片以及用selenium爬取