ELKStack实时分析Haproxy访问日志配置

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了ELKStack实时分析Haproxy访问日志配置相关的知识,希望对你有一定的参考价值。

1.Haproxy配置日志规则

在/etc/haproxy/haproxy.conf的frontend下增加

option httplog
option logasap
log LogServerIP local5

capture request header Host len 40
capture request header X-Forwarded-For len 50
#capture request header Accept-Language len 50
capture request header Referer len 200
capture request header User-Agent len 200

2.syslog配置开启远程接收

3.Logstash配置

indexer

input {
        file {
                path => "/var/log/haproxy.log"
                start_position => beginning
                sincedb_write_interval => 0
                type => "HAPROXY_LOG"
                codec => plain {
                        charset => "ISO-8859-1"
                }
        }
}



output {
        #stdout { codec => rubydebug}
        redis {
                data_type => "list"
                key => "logstash:Haproxy_log"
                host => "192.168.1.2"
                port => 6379
        }
}

shipping

input {
    redis {
        data_type => "list"
        key => "logstash:Haproxy_log"
        host => "192.168.1.2"
        port => 6379
        threads => 5
        type => "HAPROXY_LOG"
    }
}

filter {
       grok{
          match => ["message" , "%{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{MONTHDAY:haproxy_monthday}/%{MONTH:haproxy_month}/%{YEAR:haproxy_year}:(?!<[0-9])%{HOUR:haproxy_hour}:%{MINUTE:haproxy_minute}(?::%{SECOND:haproxy_second})(?![0-9]).%{INT:haproxy_milliseconds}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/\+%{NOTSPACE:time_duration} %{INT:http_status_code} \+%{NOTSPACE:bytes_read} %{DATA:captured_request_cookie} %{DATA:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} (\{%{IPORHOST:Host}\|?(%{IP:X_Forward_For})?\|?(%{URI:Referer})?\|%{GREEDYDATA:User_Agent}\})?( )( )?\"(<BADREQ>|(%{WORD:http_method} (%{URIPROTO:http_proto}://)?(?:%{USER:http_user}(?::[^@]*)[email protected])?(?:%{URIHOST:http_host})?(?:%{URIPATHPARAM:http_request})?( HTTP/%{NUMBER:http_version})?))?\""]
      }
        useragent {
                source => "User_Agent"
                target => "ua"
        }
        if [X_Forward_For] =~ "." {
           geoip {
                source => ["X_Forward_For"]
                database => "/usr/local/logstash2.2.2/bin/GeoLiteCity.dat"

          }
        } else {
           geoip {
                source => ["client_ip"]
                database => "/usr/local/logstash2.2.2/bin/GeoLiteCity.dat"

          }
        }
       date{
          match => ["log_timestamp", "YYYY-MM-dd HH:mm:ss" ]
          timezone =>"Etc/UCT"
       }
       mutate{
                remove_field => ["log_timestamp"]
                remove_field => [ "host" ]
                remove_field => [ "path" ]
                remove_field => [ "pid" ]
                remove_field => [ "client_port" ]
                remove_field => [ "program" ]
                remove_field => [ "haproxy_monthday" ]
                remove_field => [ "haproxy_month" ]
                remove_field => [ "haproxy_year" ]
                remove_field => [ "haproxy_hour" ]
                remove_field => [ "haproxy_minute" ]
                remove_field => [ "haproxy_second" ]
                remove_field => [ "haproxy_milliseconds" ]
                remove_field => [ "frontend_name" ]
                remove_field => [ "captured_response_cookie" ]
                remove_field => [ "captured_request_cookie" ]
          convert => [ "timetaken","integer" ]
          convert => [ "http_status_code","integer" ]
          convert => [ "bytes_read","integer" ]
          convert => [ "time_duration","integer" ]
          convert => [ "time_backend_response","integer" ]
          convert => [ "actconn","integer" ]
          convert => [ "feconn","integer" ]
          convert => [ "beconn","integer" ]
          convert => [ "srvconn","integer" ]
          convert => [ "retries","integer" ]
          convert => [ "srv_queue","integer" ]
          convert => [ "backend_queue","integer" ]
          convert => [ "time_request","integer" ]
          convert => [ "time_queue","integer" ]
          convert => [ "time_backend_connect","integer" ]

      }
}
output {
        #stdout { codec => rubydebug}
        elasticsearch {
                hosts => "192.168.1.20:9200"
                index => "logstash-%{+YYYY.MM.dd}"
        }
}


本文出自 “枫林晚” 博客,请务必保留此出处http://fengwan.blog.51cto.com/508652/1755489

以上是关于ELKStack实时分析Haproxy访问日志配置的主要内容,如果未能解决你的问题,请参考以下文章

ELKstack-企业级日志收集系统

企业级日志收集系统——ELKstack

企业级日志收集系统——ELKstack

ELKStack 实战之 Elasticsearch [一]

ELkStack集群核心概念 #yyds干货盘点#

ELKStack生产案例