filebeat收集json日志

Posted 我是一只小小茑

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了filebeat收集json日志相关的知识,希望对你有一定的参考价值。

  filebeat收集json日志,

一 Tomcat的日志配置为json格式

[root@centos2 conf]# vim /usr/local/tomcat/conf/server.xml


#找到139行,将`pattern="%h %l %u %t "%r" %s %b" />删除

添加下面的




pattern=""clientip":"%h",  "client user":"%l",   "authenticated":"%u",   "AccessTime":"%t",     "method":"%r",   "status":"%s",  "SendBytes":"%b",  "Query?string":"%q",  "partner":"%Refereri",  "AgentVersion":"%User-Agenti""/>



nginx日志配置为json格式

[root@centos2 nginx]# head -n 50 nginx.conf #红色字体部分



user root;

worker_processes  1;



events

    worker_connections  1024;



http

    include       mime.types;

    default_type  application/octet-stream;


    map $http_upgrade $connection_upgrade

        default upgrade;

        close;

   

#按照json格式产生日志文件

    log_format json "@timestamp": "$time_local",

                        "remote_addr": "$remote_addr",

                        "referer": "$http_referer",

                        "request": "$request",

                        "status": $status,

                        "bytes": $body_bytes_sent,

                        "agent": "$http_user_agent",

                        "x_forwarded": "$http_x_forwarded_for",

                        "up_addr": "$upstream_addr",

                        "up_host": "$upstream_http_host",

                        "up_resp_time": "$upstream_response_time",

                        "request_time": "$request_time"

                        ;

    access_log  /var/log/nginx/access.log  json;



    client_max_body_size 100m;

    sendfile        on;

    keepalive_timeout  65;


    upstream assemble

                 ip_hash;

                 server 192.168.2.12:8040 fail_timeout=30s;

   

    upstream websocket

                 server 192.168.2.12:8040;

   

    upstream websocketMQ

                 server 192.168.2.12:3872;

   


    upstream nodejs

                 ip_hash;





三 修改filebeat配置文件



#=========================== Filebeat inputs =============================

filebeat.inputs:

- type: log

  enabled: true

  paths:

    - /usr/local/tomcat/logs/access_log*

  json.keys_under_root: true

  json.overwrite_keys: true

  tags: ["tomcat"]


#----------------------------- Logstash output --------------------------------

output.logstash:

  # The Logstash hosts

  hosts: ["192.168.2.222:5044"]

  indices:

    - index: "tomcat-access-%[beat.version]-%+yyyy.MM"

      when.contains:

        tags: "tomcat"




.重启filebeat

[root@db01 ~]# systemctl restart filebeat


filebeat收集json日志_filebeat



filebeat收集json日志_filebeat_02


收集多个日志:

[root@centos2 filebeat]# vim /etc/filebeat/filebeat.yml



#=========================== Filebeat inputs =============================


filebeat.inputs:

- type: log

  tail_files: true

  scan_frequency: 5s

  backoff: 1s

  max_backoff: 10s

  paths:

    - /usr/local/tomcat/logs/catalina.out

    - /usr/local/tomcat/logs/access_log*

  fields:

    type: tomcat

    ip: 192.168.2.231

  fields_under_root: true


- type: log

  tail_files: true

  scan_frequency: 5s

  backoff: 1s

  max_backoff: 10s

  paths:

      - /home/docker/nginx/log/access.log

  fields:

    type: nginx

    ip: 192.168.2.231

  fields_under_root: true


#----------------------------- Logstash output --------------------------------

output.logstash:

  # The Logstash hosts

  enabled: true

  hosts: ["192.168.2.222:5044"]




[root@master conf.d]# cat /etc/logstash/conf.d/nginx.conf



input

        beats

                host => 0.0.0.0

                port => 5044

       


filter

 if [type] == "access"

    grok

        match =>

            "message" => (?<clientip>[0-9]1,3\\.[0-9]1,3\\.[0-9]1,3\\.[0-9]1,3) - (?<user>\\S+) \\[(?<timestamp>[^ ]+ \\+[0-9]+)\\] "(?<requesttype>[A-Z]+) (?<requesturl>[^

 ]+) HTTP/\\d.\\d" (?<status>\\d+) (?<bodysize>\\d+) "(?<url>\\S+)" "[^"]+"

#移除不需要的字段

       remove_field => ["message","@version","path"]

   

    date

        match => ["requesttime", "dd/MMM/yyyy:HH:mm:ss Z"]

        target => "@timestamp"

   

 


output

  if [type] == "nginx"

    elasticsearch

      hosts => ["​​http://192.168.2.222:9200​​"]

      index => "nginx_log-%+YYYY.MM.dd"

   

 

  else if [type] == "tomcat"

    elasticsearch

      hosts => ["​​http://192.168.2.222:9200​​"]

      index => "tomcat_log-%+YYYY.MM.dd"

   

 

  else if [type] == "access"

    elasticsearch

      hosts => ["​​http://192.168.2.222:9200​​"]

      index => "access-%+YYYY.MM.dd"

   

 

    stdout

      codec=>rubydebug

   




filebeat收集json日志_filebeat_03


filebeat收集json日志_filebeat_04


先重启logstash,观察端口是否启动,后重启filebeat














以上是关于filebeat收集json日志的主要内容,如果未能解决你的问题,请参考以下文章

docker容器日志收集方案(方案一 filebeat+本地日志收集)

Elastic (ELK) Stack 实战教程06Filebeat 日志收集实践(下)

通过kafka和filebeat收集日志 再保存到clickhouse 最后通过grafana展现

Logstash Filebeat 安装配置之使用 Kibana 分析日志数据

filebeat+logstash+elasticsearch收集haproxy日志

filebeat收集日志常见问题