栏目抓取网站日kafka

Posted 小咖啡

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了栏目抓取网站日kafka相关的知识,希望对你有一定的参考价值。


#!/usr/bin/python3
#-*- coding:utf-8 -*-
"""
create 2018-02-27
author zl
desc:
https://indianexpress.com#印度快报
files:
indianexpress.py #爬虫
config.ini #配置文件
max_main_interval #脚本循环间隔
network_timeout #超时
proxy #代理
dependent:
configparser,bs4
usage:
skip

"""
config.ini
[indianexpress]
max_main_interval=1800
network_timeout=15
proxy=
indianexpress.py

#!/usr/bin/python3
#-*- coding:utf-8 -*-
"""
create 2018-02-27
author zl
desc:
    https://indianexpress.com#印度快报
files:
    indianexpress.py         #爬虫
    config.ini #配置文件
    max_main_interval  #脚本循环间隔
    network_timeout #超时
    proxy   #代理
dependent:
    configparser,bs4
usage:
    skip

"""
from pb_code import *
from bs4 import BeautifulSoup
import re
import time


def get_list(urls):
    for url in urls[url]:
        try:
            resp=fetch_get(False,url,network_timeout,proxy)
            if resp is not None:
                soup=BeautifulSoup(resp,html.parser)
                news_list=soup.find_all(div,class_=urls[list_class])
                for art in news_list:
                    timedate=art.find(div,class_=date).text
                    #print(timedate)
                    timepattern=re.compile(\w+ \d{1,2}, \d{4} \d{1,2}:\d{1,2})
                    t1=timepattern.findall(timedate)
                    timestamp=int(time.mktime(time.strptime(t1[0],"%B %d, %Y %H:%M")))+3600*2.5
                    if pm in timedate:
                        timestamp=timestamp+3600*12
                    if int(time.time())-timestamp<3600:
                        title=art.find(div,class_=title)
                        #print(title)
                        url=title.find(a)
                        art_json={
                            "publishtime":time.strftime("%Y-%m-%d %X",time.localtime(timestamp)),
                            "author":"",
                            "title":url.text,
                            "content":"",
                            "gathertime":time.strftime("%Y-%m-%d %X",time.localtime()),
                            "inserttime":"",
                            "url":url.attrs[href],
                            "group_id":"7",
                            "site_id":"",
                            "site_url":"",
                            "language":"EN"


                        }
                        get_page(urls[page_content_id],art_json)
                    elif timestamp is not None:
                        break
        except Exception as e:
            print(get_list {} ,{}.format(e,url))
        time.sleep(random.randint(3,10))


def get_page(tag_id,art_json):
    resp=fetch_get(False,art_json[url],network_timeout,proxy)
    #print(resp)
    if resp is not None:
        soup=BeautifulSoup(resp,html.parser)
        art_content=soup.find(div,itemprop=tag_id).find_all(p)
        content=map(lambda x:x.text,art_content)
        #print(list(content))
        new_content=‘‘.join(list(content))
        art_json[content]=new_content
        art_json[inserttime]=time.strftime("%Y-%m-%d %X",time.localtime())
        print(art_json)
        time.sleep(random.randint(3,10))


if __name__==__main__:
    site_name=indianexpress
    max_main_interval,network_timeout,proxy=load_config(site_name)
    while True:
        get_list(load_urls(indianexpress))
        print("{} all done ,sleep{} sec.".format(site_name,max_main_interval))
        time.sleep(max_main_interval)
        get_page()
pb_code.py
from configparser import ConfigParser
import json
import random
import requests

USER_AGENTS = [
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
    "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
    "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
    "Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
    "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
    "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
    "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
def load_config(segment):
    ‘‘‘从配置文件加载参数信息‘‘‘
    config=ConfigParser()
    config.read(config.ini,utf-8-sig)
    p1=int(config.get(segment,max_main_interval))
    p2=int(config.get(segment,network_timeout))
    p3=config.get(segment,proxy)
    if p3==‘‘:
        p3=None
    return p1,p2,p3
def load_urls(fname):
    ‘‘‘加载采集url‘‘‘
    with open(fname+.json,encoding=utf-8) as f:
        r =json.load(f)
        return r
def fetch_get(is_json,url,timeout,proxy):
    ‘‘‘发送get请求并返回解析数据‘‘‘
    head={
        User-Agent:random.choice(USER_AGENTS)
    }
    req=requests.get(url,headers=head,timeout=timeout,proxies={http,proxy} if proxy else  None)
    if is_json:
        x=json.loads(req.text)
    else:
        x=req.text
    return x
def save_db(jdata):
    ‘‘‘保存json到kafka:通过中转接口‘‘‘
    print("site_name:{} url{}".format(jdata[site_name],jdata[url]))
    requests.post(‘‘,json=jdata,headers={"Content-Type": "application/json","keep_alive":False})
indianexpress.json
{
  "list_class": "articles",
  "page_content_id": "articleBody",
  "url": [
     "http://indianexpress.com/latest-news/"
  ]
}

 

 


 

以上是关于栏目抓取网站日kafka的主要内容,如果未能解决你的问题,请参考以下文章

如何分析一个网站适不适合做的SEO优化

分享一个简单的python+mysql网络数据抓取

今日头条算法

主导SEO成败的关键是细节的布局,细数SEO三大布局思路

网站栏目怎么出现下拉菜单,就是鼠标放上去怎么显示二级栏目?

帝国cms如何生成rss,还有网站地图