Elk实时日志分析平台5.0版本源码安装配置
Posted
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Elk实时日志分析平台5.0版本源码安装配置相关的知识,希望对你有一定的参考价值。
目录
一、 安装JAVA
# mkdir /usr/local/java/ –p # cd /usr/local/java/# tar zxvf /data/elk5.0/jdk-8u111-linux-x64.tar.gz # cat >>/etc/profile<<EOF export JAVA_HOME=/usr/local/java/jdk1.8.0_111 export PATH=$PATH:$JAVA_HOME/bin export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar EOF # source /etc/profile # java -version java version "1.8.0_111"Java(TM) SE Runtime Environment (build 1.8.0_111-b14) Java HotSpot(TM) 64-Bit Server VM (build 25.111-b14, mixed mode)
二、 安装elasticsearch
# mkdir /data/PRG/ -p # cd /data/PRG/# tar zxvf /data/elk5.0/elasticsearch-5.0.2.tar.gz # mv elasticsearch-5.0.2 elasticsearch # useradd elasticsearch -s /sbin/nologin # chown elasticsearch. elasticsearch /data/PRG/elasticsearch/
添加启动脚本
vi /etc/init.d/elasticsearch
#!/bin/sh# # elasticsearch <summary># # chkconfig: 2345 80 20# description: Starts and stops a single elasticsearch instance on this system # ### BEGIN INIT INFO # Provides: Elasticsearch # Required-Start: $network $named # Required-Stop: $network $named # Default-Start: 2 3 4 5# Default-Stop: 0 1 6# Short-Description: This service manages the elasticsearch daemon # Description: Elasticsearch is a very scalable, schema-free and high-performance search solution supporting multi-tenancy and near realtime search. ### END INIT INFO # # init.d / servicectl compatibility (openSUSE) #if [ -f /etc/rc.status ]; then. /etc/rc.status rc_resetfi# # Source function library. #if [ -f /etc/rc.d/init.d/functions ]; then. /etc/rc.d/init.d/functionsfi# Sets the default values for elasticsearch variables used in this script ES_USER="elasticsearch"ES_GROUP="elasticsearch"ES_HOME="/data/PRG/elasticsearch"MAX_OPEN_FILES=65536MAX_MAP_COUNT=262144LOG_DIR="/var/log/elasticsearch"DATA_DIR="/var/lib/elasticsearch"CONF_DIR="/data/PRG/elasticsearch/config"PID_DIR="/var/run/elasticsearch"# Source the default env fileES_ENV_FILE="/etc/sysconfig/elasticsearch"if [ -f "$ES_ENV_FILE" ]; then. "$ES_ENV_FILE"fi# CONF_FILE setting was removedif [ ! -z "$CONF_FILE" ]; thenecho "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."exit 1fiexec="$ES_HOME/bin/elasticsearch"prog="elasticsearch"pidfile="$PID_DIR/${prog}.pid"export ES_HEAP_SIZE export ES_HEAP_NEWSIZE export ES_DIRECT_SIZE export ES_JAVA_OPTS export ES_GC_LOG_FILE export ES_STARTUP_SLEEP_TIME export JAVA_HOME export ES_INCLUDElockfile=/var/lock/subsys/$prog # backwards compatibility for old config sysconfig files, pre 0.90.1if [ -n $USER ] && [ -z $ES_USER ] ; then ES_USER=$USERficheckJava() {if [ -x "$JAVA_HOME/bin/java" ]; thenJAVA="$JAVA_HOME/bin/java"elseJAVA=`which java`fiif [ ! -x "$JAVA" ]; thenecho "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"exit 1fi} start() { checkJava [ -x $exec ] || exit 5if [ -n "$MAX_LOCKED_MEMORY" -a -z "$ES_HEAP_SIZE" ]; thenecho "MAX_LOCKED_MEMORY is set - ES_HEAP_SIZE must also be set"return 7fiif [ -n "$MAX_OPEN_FILES" ]; thenulimit -n $MAX_OPEN_FILESfiif [ -n "$MAX_LOCKED_MEMORY" ]; thenulimit -l $MAX_LOCKED_MEMORYfiif [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; thensysctl -q -w vm.max_map_count=$MAX_MAP_COUNTfiexport ES_GC_LOG_FILE # Ensure that the PID_DIR exists (it is cleaned at OS startup time)if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; thenmkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"fiif [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; thentouch "$pidfile" && chown "$ES_USER":"$ES_GROUP" "$pidfile"ficd $ES_HOMEecho -n $"Starting $prog: "# if not running, start it up here, usually something like "daemon $exec"daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d #daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.conf=$CONF_DIR retval=$?echo[ $retval -eq 0 ] && touch $lockfilereturn $retval } stop() {echo -n $"Stopping $prog: "# stop it here, often "killproc $prog"killproc -p $pidfile -d 86400 $prog retval=$?echo[ $retval -eq 0 ] && rm -f $lockfilereturn $retval } restart() { stop start } reload() { restart } force_reload() { restart } rh_status() { # run checks to determine if the service is running or use generic status status -p $pidfile $prog } rh_status_q() { rh_status >/dev/null 2>&1}case "$1" instart) rh_status_q && exit 0$1;; stop) rh_status_q || exit 0$1;; restart) $1;; reload) rh_status_q || exit 7$1;; force-reload) force_reload ;; status) rh_status ;; condrestart|try-restart) rh_status_q || exit 0restart ;;*)echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"exit 2esacexit $?
# chmod +x /etc/init.d/elasticsearch # /etc/init.d/elasticsearch start # /etc/init.d/elasticsearch status elasticsearch (pid 20895) is running... # netstat -ntlp |grep 9[2-3]00tcp 0 0 :::9300 :::* LISTEN 20895/java tcp 0 0 :::9200 :::* LISTEN 20895/java
三、 配置elasticsearch
内存低于2G,需要修改jvm配置
-Xms512m
-Xmx512m
# cat /data/PRG/elasticsearch/config/elasticsearch.yml |grep -v ‘#‘network.host: 0.0.0.0 ###开启监听地址, action.auto_create_index: .security,.monitoring*,.watches,.triggered_watches,.watcher-history*####以下模块视情况是否开启 xpack.security.enabled: true ####开启用户认证 xpack.monitoring.enabled: truexpack.graph.enabled: truexpack.watcher.enabled: truexpack.security.authc.realms: ####用户认证模式,ldap、file、pki、Active Directory等 file1: type: fileorder: 0
四、 安装logstash
# cd /data/PRG/# tar zxvf /data/elk5.0/logstash-5.0.2.tar.gz # mv logstash-5.0.2 logstash # useradd logstash -s /sbin/nologin # chown logstash. logstash /data/PRG/logstash
添加启动脚本
vim /etc/init.d/logstash
#!/bin/sh# Init script for logstash # Maintained by Elasticsearch # Generated by pleaserun. # Implemented based on LSB Core 3.1: # * Sections: 20.2, 20.3# ### BEGIN INIT INFO # Provides: logstash # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5# Default-Stop: 0 1 6# Short-Description: # Description: Starts Logstash as a daemon. ### END INIT INFO PATH=/sbin:/usr/sbin:/bin:/usr/bin export PATHif [ `id -u` -ne 0 ]; then echo "You need root privileges to run this script" exit 1finame=logstash pidfile="/var/run/$name.pid"LS_USER=logstash LS_GROUP=logstash LS_HOME=/var/lib/logstash LS_HEAP_SIZE="1g"LS_LOG_DIR=/var/log/logstash LS_LOG_FILE="${LS_LOG_DIR}/$name.log"LS_CONF_DIR=/etc/logstash/conf.d LS_OPEN_FILES=16384LS_NICE=19KILL_ON_STOP_TIMEOUT=${KILL_ON_STOP_TIMEOUT-0} #default value is zero to this variable but could be updated by user request LS_OPTS=""[ -r /etc/default/$name ] && . /etc/default/$name [ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name program=/opt/logstash/bin/logstash args="agent -f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}"quiet() { "[email protected]" > /dev/null 2>&1 return $?} start() { LS_JAVA_OPTS="${LS_JAVA_OPTS} -Djava.io.tmpdir=${LS_HOME}" HOME=${LS_HOME} export PATH HOME LS_HEAP_SIZE LS_JAVA_OPTS LS_USE_GC_LOGGING LS_GC_LOG_FILE # chown doesn‘t grab the suplimental groups when setting the user:group - so we have to do it for it. # Boy, I hope we‘re root here. SGROUPS=$(id -Gn "$LS_USER" | tr " " "," | sed ‘s/,$//‘; echo ‘‘) if [ ! -z $SGROUPS ] thenEXTRA_GROUPS="--groups $SGROUPS" fi # set ulimit as (root, presumably) first, before we drop privileges ulimit -n ${LS_OPEN_FILES} # Run the program! nice -n ${LS_NICE} chroot --userspec $LS_USER:$LS_GROUP $EXTRA_GROUPS / sh -c " cd $LS_HOME ulimit -n ${LS_OPEN_FILES} exec \"$program\" $args " > "${LS_LOG_DIR}/$name.stdout" 2> "${LS_LOG_DIR}/$name.err" & # Generate the pidfile from here. If we instead made the forked process # generate it there will be a race condition between the pidfile writing # and a process possibly asking for status. echo $! > $pidfile echo "$name started." return 0} stop() { # Try a few times to kill TERM the program if status ; thenpid=`cat "$pidfile"`echo "Killing $name (pid $pid) with SIGTERM"kill -TERM $pid # Wait for it to exit.for i in 1 2 3 4 5 6 7 8 9 ; do echo "Waiting $name (pid $pid) to die..." status || break sleep 1doneif status ; then if [ $KILL_ON_STOP_TIMEOUT -eq 1 ] ; thenecho "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."kill -KILL $pidecho "$name killed with SIGKILL." elseecho "$name stop failed; still running."return 1 # stop timed out and not forced fielse echo "$name stopped."fi fi} status() { if [ -f "$pidfile" ] ; thenpid=`cat "$pidfile"`if kill -0 $pid > /dev/null 2> /dev/null ; then # process by this pid is running. # It may not be our pid, but that‘s what you get with just pidfiles. # TODO(sissel): Check if this process seems to be the same as the one we # expect. It‘d be nice to use flock here, but flock uses fork, not exec, # so it makes it quite awkward to use in this case. return 0else return 2 # program is dead but pid file existsfi elsereturn 3 # program is not running fi} reload() { if status ; thenkill -HUP `cat "$pidfile"` fi} force_stop() { if status ; thenstop status && kill -KILL `cat "$pidfile"` fi} configtest() { # Check if a config file exists if [ ! "$(ls -A ${LS_CONF_DIR}/* 2> /dev/null)" ]; thenecho "There aren‘t any configuration files in ${LS_CONF_DIR}"return 1 fi HOME=${LS_HOME} export PATH HOME test_args="--configtest -f ${LS_CONF_DIR} ${LS_OPTS}" $program ${test_args} [ $? -eq 0 ] && return 0 # Program not configured return 6}case "$1" in start) status code=$?if [ $code -eq 0 ]; then echo "$name is already running"else start code=$?fiexit $code ;; stop) stop ;; force-stop) force_stop ;; status) status code=$?if [ $code -eq 0 ] ; then echo "$name is running"else echo "$name is not running"fiexit $code ;; reload) reload ;; restart) quiet configtest RET=$?if [ ${RET} -ne 0 ]; then echo "Configuration error. Not restarting. Re-run with configtest parameter for details" exit ${RET}fistop && start ;; configtest) configtest exit $?;; *)echo "Usage: $SCRIPTNAME {start|stop|force-stop|status|reload|restart|configtest}" >&2exit 3 ;;esacexit $?
# chmod +x /etc/init.d/logstash # /etc/init.d/logstash start # /etc/init.d/logstash status logstash is running # netstat -ntlp|grep 9600tcp 0 0 :::9600 :::* LISTEN 10141/java
五、 配置 logstash
# cat /data/PRG/logstash/config/logstash.yml |grep -v ‘#‘http.host: "0.0.0.0" ###开启监听地址
nginx日志收集
# cat /data/PRG/logstash/conf.d/filter.conf input { beats { port => 10200} } filter { grok { match => { message => "%{IPORHOST:remote_addr} , \[%{HTTPDATE:timestamp}\] , %{IPORHOST:http_host} , \"%{WORD:http_verb} (?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code} , %{NUMBER:bytes_read} , %{QS:referrer} , %{QS:agent} , \"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" , - , - , - , %{IPORHOST:server_ip} , %{BASE10NUM:request_duration}" } match => { message => "%{IPORHOST:remote_addr} , \[%{HTTPDATE:timestamp}\] , %{IPORHOST:http_host} , \"%{WORD:http_verb} (?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code} , %{NUMBER:bytes_read} , %{QUOTEDSTRING:referrer} , %{QS:agent} , \"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" , %{IPORHOST}:%{INT} , %{INT} , %{BASE10NUM} , %{IPORHOST} , %{BASE10NUM:request_duration}" } } } output { elasticsearch { hosts => ["192.168.62.200:9200"] index => "operation-%{+YYYY.MM.dd}"document_type => "nginx2"user => ‘admin‘ #### elasticsearch的用户名,用X-PACK插件创建 password => ‘kbsonlong‘ #### elasticsearch的用户名 } stdout { codec => rubydebug } }
六、 安装kibana
# cd /data/PRG/# tar zxvf /data/elk5.0/kibana-5.0.2-linux-x86_64.tar.gz # mv kibana-5.0.2-linux-x86_64 kibana # useradd kibana –s /sbin/nologin # chown kibana. kibana /data/PRG/kibana
添加启动脚本
# vim /etc/init.d/kibana
#!/bin/sh# Init script for kibana # Maintained by # Generated by pleaserun. # Implemented based on LSB Core 3.1: # * Sections: 20.2, 20.3# ### BEGIN INIT INFO # Provides: kibana # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5# Default-Stop: 0 1 6# Short-Description: # Description: Kibana ### END INIT INFO PATH=/sbin:/usr/sbin:/bin:/usr/bin export PATH KIBANA_HOME=/data/PRG/kibana name=kibana program=$KIBANA_HOME/bin/kibana args=‘‘pidfile="$KIBANA_HOME/logs/$name.pid"LOG_HOME="$KIBANA_HOME/logs"[ -r /etc/default/$name ] && . /etc/default/$name [ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name [ -z "$nice" ] && nice=0trace() { logger -t "/etc/init.d/kibana" "[email protected]"} emit() { trace "[email protected]" echo "[email protected]"} start() { # Ensure the log directory is setup correctly. [ ! -d "$LOG_HOME" ] && mkdir "$LOG_HOME" chmod 755 "$LOG_HOME" # Setup any environmental stuff beforehand # Run the program! #chroot --userspec "$user":"$group" "$chroot" sh -c " $program $args >> $LOG_HOME/kibana.stdout 2>> $LOG_HOME/kibana.stderr & # Generate the pidfile from here. If we instead made the forked process # generate it there will be a race condition between the pidfile writing # and a process possibly asking for status. echo $! > $pidfile emit "$name started" return 0} stop() { # Try a few times to kill TERM the program if status ; thenpid=$(cat "$pidfile")echo "Killing $name (pid $pid) with SIGTERM"ps -ef |grep $pid |grep -v ‘grep‘ |awk ‘{print $2}‘ | xargs kill -9# Wait for it to exit.for i in 1 2 3 4 5 ; do trace "Waiting $name (pid $pid) to die..." status || break sleep 1doneif status ; then if [ "$KILL_ON_STOP_TIMEOUT" -eq 1 ] ; thentrace "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."kill -KILL $pid emit "$name killed with SIGKILL." elseemit "$name stop failed; still running." fielse emit "$name stopped."fi fi} status() { if [ -f "$pidfile" ] ; thenpid=$(cat "$pidfile")if ps -p $pid > /dev/null 2> /dev/null ; then # process by this pid is running. # It may not be our pid, but that‘s what you get with just pidfiles. # TODO(sissel): Check if this process seems to be the same as the one we # expect. It‘d be nice to use flock here, but flock uses fork, not exec, # so it makes it quite awkward to use in this case. return 0else return 2 # program is dead but pid file existsfi elsereturn 3 # program is not running fi}case "$1" in force-start|start|stop|status|restart) trace "Attempting ‘$1‘ on kibana";;esaccase "$1" in force-start) PRESTART=no exec "$0" start ;; start) status code=$?if [ $code -eq 0 ]; then emit "$name is already running" exit $codeelse start exit $?fi;; stop) stop ;; status) status code=$?if [ $code -eq 0 ] ; then emit "$name is running"else emit "$name is not running"fiexit $code ;; restart) stop && start ;; *)echo "Usage: $SCRIPTNAME {start|force-start|stop|force-start|force-stop|status|restart}" >&2exit 3 ;;esacexit $?
# chmod +x /etc/init.d/kibana # /etc/init.d/kibana start # /etc/init.d/kibana status # netstat -ntlp |grep 5601tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 13052/node
七、 配置kibana
# cat /data/PRG/kibana/config/kibana.yml |grep -v ‘#‘server.host: "0.0.0.0"####以下模块视情况是否开启 xpack.security.enabled: truexpack.monitoring.enabled: truexpack.graph.enabled: truexpack.reporting.enabled: true
八、 安装x-pack插件
# /data/PRG/kibana/bin/kibana-plugin install file:///root/x-pack-5.0.0.zip# /data/PRG/elasticsearch/bin/elasticsearch-plugin install file:///root/x-pack-5.0.0.zip
离线安装x-pack要修改用户脚本,默认创建用户配置文件在/etc/elasticsearch/x-pack目录
在创建用户的时候提示/etc/elasticsearch/x-pack/users…tmp不存在,直接创建目录或者修改/data/PRG/elasticsearch/bin/x-pack/users脚本
# mkdir /etc/elasticsearch/x-pack/# chown elasticsearch. elasticsearch /etc/elasticsearch/x-pack/ -R
九、 x-pack管理用户
1、 添加用户
# cd /data/PRG/elasticsearch # bin/x-pack/users useradd admin -p kbsonlong -r superuser
2、 查看用户
# /data/PRG/elasticsearch/bin/x-pack/users list admin : superuser
test : - ###创建用户时没有添加-r参数,所以没有用户角色
3、 测试用户登录
# curl http://localhost:9200/_xpack/ --user admin:kbsonlong{"build":{"hash":"7763f8e","date":"2016-10-26T04:51:59.202Z"},"license":{"uid":"06a82587-66ac-4d4a-90c4-857d9ca7f3bc","type":"trial","mode":"trial","status":"active","expiry_date_in_millis":1483753731066},"features":{"graph":{"description":"Graph Data Exploration for the Elastic Stack","available":true,"enabled":true},"monitoring":{"description":"Monitoring for the Elastic Stack","available":true,"enabled":true},"security":{"description":"Security for the Elastic Stack","available":true,"enabled":true},"watcher":{"description":"Alerting, Notification and Automation for the Elastic Stack","available":true,"enabled":true}},"tagline":"You know, for X"}
4、 删除用户
# /data/PRG/elasticsearch/bin/x-pack/users userdel test # /data/PRG/elasticsearch/bin/x-pack/users list admin : superuser
十、 安装filebeat
# cd /data/PRG # tar zxvf / data/elk5.0/filebeat-5.0.0-linux-x86_64.tar.gz # mv filebeat-5.0.0-linux-x86_64 filebeat
配置启动脚本
# vim /etc/init.d/filebeat
#!/bin/bash # # filebeat filebeat shipper # # chkconfig: 2345 98 02# ### BEGIN INIT INFO # Provides: filebeat # Required-Start: $local_fs $network $syslog # Required-Stop: $local_fs $network $syslog # Default-Start: 2 3 4 5# Default-Stop: 0 1 6# Short-Description: Sends log files to Logstash or directly to Elasticsearch. # Description: filebeat is a shipper part of the Elastic Beats # family. Please see: https://www.elastic.co/products/beats### END INIT INFO PATH=/usr/bin:/sbin:/bin:/usr/sbin export PATH [ -f /etc/sysconfig/filebeat ] && . /etc/sysconfig/filebeat pidfile=${PIDFILE-/data/PRG/filebeat/filebeat.pid} agent=${PB_AGENT-/data/PRG/filebeat/filebeat} args="-c /data/PRG/filebeat/filebeat.yml"test_args="-e -configtest"wrapper="filebeat-god"wrapperopts="-r / -n -p $pidfile"RETVAL=0# Source function library. . /etc/rc.d/init.d/functions # Determine if we can use the -p option to daemon, killproc, and status. # RHEL < 5 can‘t.if status | grep -q -- ‘-p‘ 2>/dev/null; thendaemonopts="--pidfile $pidfile"pidopts="-p $pidfile"fitest() { $agent $args $test_args } start() {echo -n $"Starting filebeat: "testif [ $? -ne 0 ]; thenechoexit 1fidaemon $daemonopts $wrapper $wrapperopts -- $agent $args RETVAL=$?echoreturn $RETVAL } stop() {echo -n $"Stopping filebeat: "killproc $pidopts $wrapper RETVAL=$?echo[ $RETVAL = 0 ] && rm -f ${pidfile} } restart() { testif [ $? -ne 0 ]; thenreturn 1fistop start } rh_status() { status $pidopts $wrapper RETVAL=$?return $RETVAL } rh_status_q() { rh_status >/dev/null 2>&1}case "$1" instart) start ;; stop) stop ;; restart) restart ;; condrestart|try-restart) rh_status_q || exit 0restart ;; status) rh_status ;;*)echo $"Usage: $0 {start|stop|status|restart|condrestart}"exit 1esacexit $RETVAL
# cat filebeat/filebeat.yml |grep -v ‘#‘
filebeat.prospectors: - input_type: log paths: - /tmp/nginx.log output.logstash: enabled: true hosts: ["localhost:10200"]
启动filebeat
# /etc/init.d/filebeat5 start Starting filebeat: 2016/12/08 07:18:37.177631 beat.go:264: INFO Home path: [/data/PRG/filebeat] Config path: [/data/PRG/filebeat] Data path: [/data/PRG/filebeat/data] Logs path: [/data/PRG/filebeat/logs]2016/12/08 07:18:37.177681 beat.go:174: INFO Setup Beat: filebeat; Version: 5.0.02016/12/08 07:18:37.177760 logstash.go:90: INFO Max Retries set to: 32016/12/08 07:18:37.177828 outputs.go:106: INFO Activated logstash as output plugin.2016/12/08 07:18:37.177912 publish.go:291: INFO Publisher name: operation2016/12/08 07:18:37.178158 async.go:63: INFO Flush Interval set to: 1s2016/12/08 07:18:37.178170 async.go:64: INFO Max Bulk Size set to: 2048Config OK [ OK ] # /etc/init.d/filebeat5 status filebeat-god (pid 7365) is running... # ps -ef |grep filebeat root 7405 1 0 15:18 pts/1 00:00:00 filebeat-god -r / -n -p /data/PRG/filebeat/filebeat.pid -- /data/PRG/filebeat/filebeat -c /data/PRG/filebeat/filebeat.yml root 7406 7405 0 15:18 pts/1 00:00:00 /data/PRG/filebeat/filebeat -c /data/PRG/filebeat/filebeat.yml # netstat -ntlp | egrep ‘9200|9300|5601|9600|10200‘tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 14339/node tcp 0 0 :::9300 :::* LISTEN 14205/java tcp 0 0 :::10200 :::* LISTEN 14309/java tcp 0 0 ::ffff:127.0.0.1:9600 :::* LISTEN 14309/java tcp 0 0 :::9200 :::* LISTEN 14205/java
本文出自 “炎风狼” 博客,请务必保留此出处http://kbson.blog.51cto.com/5359697/1881160
以上是关于Elk实时日志分析平台5.0版本源码安装配置的主要内容,如果未能解决你的问题,请参考以下文章
ELK(ElasticSearch, Logstash, Kibana)搭建实时日志分析平台
ELK(ElasticSearch, Logstash, Kibana)搭建实时日志分析平台
ELK搭建实时日志分析平台之二Logstash和Kibana搭建
[Big Data - ELK] ELK(ElasticSearch, Logstash, Kibana)搭建实时日志分析平台