python爬虫爬取人人车(二手车)利用padasmatplotlib生成图表

Posted 都是他舅

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了python爬虫爬取人人车(二手车)利用padasmatplotlib生成图表相关的知识,希望对你有一定的参考价值。

该程序主要为了抓取人人车卖车信息,包括车系、车型号、购车日期、卖车价格、行驶路程、首付价格等等信息。话不多说直接代码。

车系py文件

# -*- coding: utf-8 -*-
import re
from urllib.request import urlopen
from scrapy.http import Request
# from urllib.request import Request
from bs4 import BeautifulSoup
from lxml import etree
import pymongo
import scrapy
from scrapy.selector import HtmlXPathSelector
client = pymongo.MongoClient(host="127.0.0.1")
db = client.renrenche
collection = db.Carclass          #表名classification


import redis        #导入redis数据库
r = redis.Redis(host=‘127.0.0.1‘, port=6379, db=0)

class renrencheSpider(scrapy.Spider):
    name = "Carinfo1"
    allowed_domains = ["renrenche.com"]   #允许访问的域
    start_urls = [
        "https://www.renrenche.com/bj/ershouche/"
    ]

    #每爬完一个网页会回调parse方法
    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        hx = hxs.select(‘//div[@class="brand-more-content"]/div[@class="brand-section brand-section-1"]/p[@class="bl"]/span[@class="bn"]/a‘)
        for secItem in hx:
            url = secItem.select("@href").extract()
            c = "https://www.renrenche.com"+url[0]
            name = secItem.select("text()").extract()
            classid =self.insertMongo(name,None)
            print(c)
            print(name)
            request = Request(c,callback=lambda response,pid=str(classid):self.parse_subclass(response,pid))
            yield request
    def parse_subclass(self, response,pid):
        # print(response.body.decode(‘utf-8‘))
        hxs = HtmlXPathSelector(response)
        hx = hxs.select(‘//ul[@id="filter_series"]/li[@class=""]/a‘)
        for secItem in hx:
            urls = secItem.select("@href").extract()
            url = "https://www.renrenche.com" + urls[0]
            name = secItem.select("text()").extract()
            print(url)
            print(name)
            classid = self.insertMongo(name,pid)
            self.pushRedis(classid,url,pid)

    def insertMongo(self,classname,pid):
        classid = collection.insert({‘classname‘:classname,‘pid‘:pid})
        return classid
    def pushRedis(self,classid,url,pid,):
        carurl = ‘%s,%s,%s‘ %(classid,url,pid)
        r.lpush(‘carurl‘,carurl)

  卖车各种信息py文件

# -*- coding: utf-8 -*-
import re
from urllib.request import urlopen
from scrapy.http import Request
import pymongo
import scrapy
from time import sleep
from scrapy.selector import HtmlXPathSelector

client = pymongo.MongoClient(host="127.0.0.1")
db = client.renrenche
collection = db.Carinfo

import redis  # 导入redis数据库

r = redis.Redis(host=‘127.0.0.1‘, port=6379, db=0)




class renrencheSpider(scrapy.Spider):
    name = "Carinfo2"
    allowed_domains = ["renrenche.com"]
    dict = {}
    start_urls = []

    def __init__(self):  # 定义一个方法

        a = r.lrange(‘carurl‘, 0, -1)
        for item in a:
            novelurl = bytes.decode(item)
            arr = novelurl.split(‘,‘)  # 分割字符串
            renrencheSpider.start_urls.append(arr[1])
            pid = arr[0]
            url = arr[1]
            self.dict[url] = {"pid":pid,"num":0}


    def parse(self, response):

        classInfo = self.dict[response.url]
        pid = classInfo[‘pid‘]
        num = classInfo[‘num‘]
        # print(self.dict)
        if num>3:
            return None
        hxs = HtmlXPathSelector(response)
        hx = hxs.select(‘//ul[@class="row-fluid list-row js-car-list"]‘)
        s=""
        for secItem in hx:
            hx1 = secItem.select(‘//li[@class="span6 list-item car-item"]/a[@rrc-event-param="search"]/h3‘)
            name = hx1.select("text()").extract()
            a = "型号:"+name[0]
            s +=a+"\n"
            # classid = collection.insert({‘carinfo‘: a, ‘pid‘: pid})
            # print(a)
        for secItem in hx:
            hx2 = secItem.select(‘//div[@class="mileage"]/span[@class="basic"]‘)
            name = hx2.select("text()").extract()
            b = "购车年份/公里数:"+name[0]+"/"+name[1]
            s +=b+"\n"
            # print(b)
        for secItem in hx:
            hx3 = secItem.select(‘//div[@class="tags-box"]/div[@class="price"]‘)
            name = hx3.select("text()").extract()
            c = str(name[0])
            c = c.strip()

            c = "卖车价格:"+c+"万"
            s +=c+"\n"
            # print(c)
        for secItem in hx:
            hx4 = secItem.select(‘//div[@class="down-payment"]/div[@class="m-l"]‘)
            name = hx4.select("text()").extract()
            d = "首付:"+name[0]+"万"
            s +=d+"\n"
            # print(d)
        print(s)
        classid = self.insertMongo(s, pid)


    def insertMongo(self, classname, pid):
        classid = collection.insert({‘classname‘: classname, ‘pid‘: pid})
        return classid
        # self.pushRedis(classid, pid)

    #     print(‘-----------递归--------------‘)
    #
    #     hxs = HtmlXPathSelector(response)
    #     hx = hxs.select(‘//li[@class="lbf-pagination-item"]/a[@class="lbf-pagination-next "]‘)
    #     urls = hx.select("@href").extract()
    #     d = "https:" + urls[0]
    #     classInfo[‘num‘] +=1
    #     self.dict[d] = classInfo
    #     print(d)
    #     request = Request(d, callback=self.parse)
    #     yield request
    #     print(‘--------end--------------‘)
    #
    # def pushRedis(self, classid, c, pid):
    #     novelnameurl = ‘%s,%s,%s‘ % (classid, c, pid)
    #     r.lpush(‘novelnameurl‘, novelnameurl)

  

以上是关于python爬虫爬取人人车(二手车)利用padasmatplotlib生成图表的主要内容,如果未能解决你的问题,请参考以下文章

利用 Python 爬取了 37483 条上海二手房信息,我得出的结论是?

python爬虫:爬取链家深圳全部二手房的详细信息

Python爬虫+数据分析:爬一爬那个很懂车的网站,分析一下现阶段哪款车值得我们去冲

python 学习 - 爬虫入门练习 爬取链家网二手房信息

从人人车融资2亿美元看,二手车平台还能刮起什么风?

Python开发爬虫之BeautifulSoup解析网页篇:爬取安居客网站上北京二手房数据