Linux一键部署ELK日志平台自动化脚本

Posted 小柒博客

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Linux一键部署ELK日志平台自动化脚本相关的知识,希望对你有一定的参考价值。

此脚本是Linux一键部署ELK日志平台自动化脚本,有需要朋友可以参考,脚本内容如下:

环境准备

操作系统:CentOS Linux release 7.8.2003

软件版本

elasticsearch:elasticsearch-7.5.1-linux-x86_64.tar.gz

kibana:kibana-7.5.1-linux-x86_64.tar.gz

logstash:logstash-7.5.1.tar.gz

filebeat:filebeat-7.5.1-linux-x86_64.tar.gz

JDK:jdk-11.0.1_linux-x64_bin.tar.gz

Nginx:nginx-1.18.0.tar.gz

Zookeeper:zookeeper-3.4.10.tar.gz

Kafka:kafka_2.12-2.5.0.tgz

脚本功能

1)一键安装Elasticsearch、Kibana、Logstash、Filebeat

2)一键安装Zookeeper

3)一键安装Kafka

4)一键安装Nginx

5)自动添加nginx_access、nginx_error索引

6)自动配置Elasticsearch用户密码

[root@localhost ~]# vim install_elk_filebeat_kafka.sh


 
   
   
 
  1. #!/bin/bash

  2. #Date:2019-5-20 13:14:00

  3. #Author Blog:

  4. # https://www.yangxingzhen.com

  5. # https://www.i7ti.cn

  6. #Author WeChat:

  7. #Author mirrors site:

  8. # https://mirrors.yangxingzhen.com

  9. #About the Author

  10. # BY:YangXingZhen

  11. # Mail:xingzhen.yang@yangxingzhen.com

  12. # QQ:675583110

  13.  

  14. User="elk"

  15. Elasticsearch_User="elastic"

  16. Elasticsearch_Passwd="www.yangxingzhen.com"

  17. IPADDR=$(hostname -I |awk '{print $1}')

  18. Elasticsearch_DIR="/data/elasticsearch"

  19. Kafka_IP=$(hostname -I |awk '{print $1}')

  20. Zookeeper_IP=$(hostname -I |awk '{print $1}')

  21. Elasticsearch_IP=$(hostname -I |awk '{print $1}')

  22.  

  23. # Define JDK path variables

  24. JDK_URL=https://mirrors.yangxingzhen.com/jdk

  25. JDK_File=jdk-11.0.1_linux-x64_bin.tar.gz

  26. JDK_File_Dir=jdk-11.0.1

  27. JDK_Dir=/usr/local/jdk-11.0.1

  28.  

  29. # Define Zookeeper path variables

  30. Zookeeper_URL=http://archive.apache.org/dist/zookeeper/zookeeper-3.4.10

  31. Zookeeper_File=zookeeper-3.4.10.tar.gz

  32. Zookeeper_File_Dir=zookeeper-3.4.10

  33. Zookeeper_PREFIX=/usr/local/zookeeper

  34.  

  35. # Define Kafka path variables

  36. Kafka_URL=https://archive.apache.org/dist/kafka/2.5.0

  37. Kafka_File=kafka_2.12-2.5.0.tgz

  38. Kafka_File_Dir=kafka_2.12-2.5.0

  39. Kafka_Dir=/usr/local/kafka

  40.  

  41. # Define nginx path variables

  42. Nginx_URL=http://nginx.org/download

  43. Nginx_File=nginx-1.18.0.tar.gz

  44. Nginx_File_Dir=nginx-1.18.0

  45. Nginx_Dir=/usr/local/nginx

  46.  

  47. # Define Elasticsearch path variables

  48. Elasticsearch_URL=https://artifacts.elastic.co/downloads/elasticsearch

  49. Elasticsearch_File=elasticsearch-7.5.1-linux-x86_64.tar.gz

  50. Elasticsearch_File_Dir=elasticsearch-7.5.1

  51. Elasticsearch_Dir=/usr/local/elasticsearch

  52.  

  53. # Define Logstash path variables

  54. Filebeat_URL=https://artifacts.elastic.co/downloads/beats/filebeat

  55. Filebeat_File=filebeat-7.5.1-linux-x86_64.tar.gz

  56. Filebeat_File_Dir=filebeat-7.5.1-linux-x86_64

  57. Filebeat_Dir=/usr/local/filebeat

  58.  

  59. # Define Kafka path variables

  60. Logstash_URL=https://artifacts.elastic.co/downloads/logstash

  61. Logstash_File=logstash-7.5.1.tar.gz

  62. Logstash_File_Dir=logstash-7.5.1

  63. Logstash_Dir=/usr/local/logstash

  64.  

  65. # Define Kibana path variables

  66. Kibana_URL=https://artifacts.elastic.co/downloads/kibana

  67. Kibana_File=kibana-7.5.1-linux-x86_64.tar.gz

  68. Kibana_File_Dir=kibana-7.5.1-linux-x86_64

  69. Kibana_Dir=/usr/local/kibana

  70.  

  71. # 配置内核参数

  72. cat >>/etc/security/limits.conf <<EOF

  73. * soft nofile 65537

  74. * hard nofile 65537

  75. * soft nproc 65537

  76. * hard nproc 65537

  77. EOF

  78.  

  79. if [ $(grep -wc "4096" /etc/security/limits.d/20-nproc.conf) -eq 0 ];then

  80. cat >>/etc/security/limits.d/20-nproc.conf <<EOF

  81. * soft nproc 4096

  82. EOF

  83. fi

  84.  

  85. cat >/etc/sysctl.conf <<EOF

  86. net.ipv4.tcp_max_syn_backlog = 65536

  87. net.core.netdev_max_backlog = 32768

  88. net.core.somaxconn = 32768

  89. net.core.wmem_default = 8388608

  90. net.core.rmem_default = 8388608

  91. net.core.rmem_max = 16777216

  92. net.core.wmem_max = 16777216

  93. net.ipv4.tcp_timestamps = 0

  94. net.ipv4.tcp_synack_retries = 2

  95. net.ipv4.tcp_syn_retries = 2

  96. net.ipv4.tcp_tw_recycle = 1

  97. net.ipv4.tcp_tw_reuse = 1

  98. net.ipv4.tcp_mem = 94500000 915000000 927000000

  99. net.ipv4.tcp_max_orphans = 3276800

  100. net.ipv4.tcp_fin_timeout = 120

  101. net.ipv4.tcp_keepalive_time = 120

  102. net.ipv4.ip_local_port_range = 1024 65535

  103. net.ipv4.tcp_max_tw_buckets = 30000

  104. fs.file-max=655350

  105. vm.max_map_count = 262144

  106. net.core.somaxconn= 65535

  107. net.ipv4.ip_forward = 1

  108. net.ipv6.conf.all.disable_ipv6=1

  109. EOF

  110.  

  111. # sysctl -p使其配置生效

  112. sysctl -p >/dev/null

  113.  

  114. # 创建elk用户

  115. [ $(grep -wc "elk" /etc/passwd) -eq 0 ] && useradd elk >/dev/null

  116.  

  117. # 安装JDK环境

  118. java -version >/dev/null 2>&1

  119. if [ $? -ne 0 ];then

  120. # Install Package

  121. [ -f /usr/bin/wget ] || yum -y install wget >/dev/null

  122. wget -c ${JDK_URL}/${JDK_File}

  123. tar xf ${JDK_File}

  124. mv ${JDK_File_Dir} ${JDK_Dir}

  125. cat >>/etc/profile <<EOF

  126. export JAVA_HOME=${JDK_Dir}

  127. export CLASSPATH=$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib

  128. export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$HOMR/bin

  129. EOF

  130. fi

  131.  

  132. # 加载环境变量

  133. source /etc/profile >/dev/null

  134.  

  135. # Install

  136. if [ -d ${Zookeeper_PREFIX} ];then

  137. echo -e "33[31mThe Zookeeper Already Install...33[0m"

  138. exit 1

  139. else

  140. wget -c ${Zookeeper_URL}/${Zookeeper_File}

  141. tar xf ${Zookeeper_File}

  142. mv ${Zookeeper_File_Dir} ${Zookeeper_PREFIX}

  143. chown -R root.root ${Zookeeper_PREFIX}

  144. mkdir -p ${Zookeeper_PREFIX}/{data,logs}

  145. cp ${Zookeeper_PREFIX}/conf/zoo_sample.cfg ${Zookeeper_PREFIX}/conf/zoo.cfg

  146. cat >${Zookeeper_PREFIX}/conf/zoo.cfg <<EOF

  147. #服务器之间或客户端与服务器之间的单次心跳检测时间间隔,单位为毫秒

  148. tickTime=2000

  149. #集群中的follower服务器(F)与leader服务器(L)之间初始连接时能容忍的最多心跳数(tickTime的数量)

  150. initLimit=10

  151. #集群中flower服务器(F)跟leader(L)服务器之间的请求和答应最多能容忍的心跳数

  152. syncLimit=5

  153. #客户端连接Zookeeper服务器的端口,Zookeeper会监听这个端口,接受客户端的访问请求

  154. clientPort=2181

  155. #存放数据文件

  156. dataDir=${Zookeeper_PREFIX}/data

  157. #存放日志文件

  158. dataLogDir=${Zookeeper_PREFIX}/logs

  159. #Zookeeper cluster,2888为选举端口,3888为心跳端口

  160. #服务器编号=服务器IP:LF数据同步端口:LF选举端口

  161. server.1=${IPADDR}:2888:3888

  162. EOF

  163.  

  164. # 写入服务ID编号

  165. echo "1" > ${Zookeeper_PREFIX}/data/myid

  166.  

  167. # Add power on self start And Start Zookeeper

  168. source /etc/profile >/dev/null && ${Zookeeper_PREFIX}/bin/zkServer.sh start

  169. fi

  170.  

  171. # Install Kafka Soft

  172. if [ ! -d ${Kafka_Dir} ];then

  173. wget -c ${Kafka_URL}/${Kafka_File}

  174. tar xf ${Kafka_File}

  175. mv ${Kafka_File_Dir} ${Kafka_Dir}

  176. # 编辑配置文件

  177. cat >${Kafka_Dir}/config/server.properties <<EOF

  178. listeners=PLAINTEXT://${IPADDR}:9092

  179. num.network.threads=3

  180. num.io.threads=8

  181. socket.send.buffer.bytes=102400

  182. socket.receive.buffer.bytes=102400

  183. socket.request.max.bytes=104857600

  184. log.dirs=/tmp/kafka-logs

  185. num.partitions=10

  186. num.recovery.threads.per.data.dir=1

  187. offsets.topic.replication.factor=1

  188. transaction.state.log.replication.factor=1

  189. transaction.state.log.min.isr=1

  190. log.retention.hours=168

  191. log.segment.bytes=1073741824

  192. log.retention.check.interval.ms=300000

  193. zookeeper.connect=${IPADDR}:2181

  194. zookeeper.connection.timeout.ms=60000

  195. group.initial.rebalance.delay.ms=0

  196. EOF

  197.  

  198. # 判断Zookeeper服务是否启动,启动成功才执行以下操作

  199. Code=""

  200. while sleep 10

  201. do

  202. echo -e "33[32m$(date +'%F %T') 等待Zookeeper服务启动...33[0m"

  203. # 获取Zookeeper服务端口

  204. [ -f /usr/bin/netstat ] || yum -y install net-tools >/dev/null

  205. netstat -lntup |grep "2181" >/dev/null

  206. if [ $? -eq 0 ];then

  207. Code="break"

  208. fi

  209. ${Code}

  210. done

  211.  

  212. # 启动Kafka服务

  213. source /etc/profile >/dev/null && ${Kafka_Dir}/bin/kafka-server-start.sh -daemon ${Kafka_Dir}/config/server.properties

  214.  

  215. # 判断Kafka服务是否启动,启动成功才执行以下操作

  216. Code=""

  217. while sleep 10

  218. do

  219. echo -e "33[32m$(date +'%F %T') 等待Kafka服务启动...33[0m"

  220. # 获取Kafka服务端口

  221. netstat -lntup |grep "9092" >/dev/null

  222. if [ $? -eq 0 ];then

  223. Code="break"

  224. fi

  225. ${Code}

  226. done

  227.  

  228. else

  229. echo -e "33[31mThe Kafka Already Install...33[0m"

  230. exit 1

  231. fi

  232.  

  233. # Install Elasticsearch

  234. if [ ! -d ${Elasticsearch_Dir} ];then

  235. # Install Package

  236. [ -f /usr/bin/wget ] || yum -y install wget >/dev/null

  237. wget -c ${Elasticsearch_URL}/${Elasticsearch_File}

  238. tar xf ${Elasticsearch_File}

  239. mv ${Elasticsearch_File_Dir} ${Elasticsearch_Dir}

  240. else

  241. echo -e "33[31mThe Elasticsearch Already Install...33[0m"

  242. exit 1

  243. fi

  244.  

  245. # Install Kibana

  246. if [ ! -d ${Kibana_Dir} ];then

  247. # Install Package

  248. [ -f /usr/bin/wget ] || yum -y install wget >/dev/null

  249. wget -c ${Kibana_URL}/${Kibana_File}

  250. tar xf ${Kibana_File}

  251. mv ${Kibana_File_Dir} ${Kibana_Dir}

  252. else

  253. echo -e "33[31mThe Kibana Already Install...33[0m"

  254. exit 1

  255. fi

  256.  

  257. # 配置Elasticsearch

  258. mkdir -p ${Elasticsearch_DIR}/{data,logs}

  259. cat >${Elasticsearch_Dir}/config/elasticsearch.yml <<EOF

  260. # 节点名称

  261. node.name: es-master

  262. # 存放数据目录,先创建该目录

  263. path.data: ${Elasticsearch_DIR}/data

  264. # 存放日志目录,先创建该目录

  265. path.logs: ${Elasticsearch_DIR}/logs

  266. # 节点IP

  267. network.host: ${Elasticsearch_IP}

  268. # tcp端口

  269. transport.tcp.port: 9300

  270. # http端口

  271. http.port: 9200

  272. # 主合格节点列表,若有多个主节点,则主节点进行对应的配置

  273. cluster.initial_master_nodes: ["${Elasticsearch_IP}:9300"]

  274. # 是否允许作为主节点

  275. node.master: true

  276. # 是否保存数据

  277. node.data: true

  278. node.ingest: false

  279. node.ml: false

  280. cluster.remote.connect: false

  281. # 跨域

  282. http.cors.enabled: true

  283. http.cors.allow-origin: "*"

  284. # 配置X-Pack

  285. http.cors.allow-headers: Authorization

  286. xpack.security.enabled: true

  287. xpack.security.transport.ssl.enabled: true

  288. EOF

  289.  

  290. # 配置Kibana

  291. cat >${Kibana_Dir}/config/kibana.yml <<EOF

  292. server.port: 5601

  293. server.host: "${Elasticsearch_IP}"

  294. elasticsearch.hosts: ["http://${Elasticsearch_IP}:9200"]

  295. elasticsearch.username: "${Elasticsearch_User}"

  296. elasticsearch.password: "${Elasticsearch_Passwd}"

  297. logging.dest: ${Kibana_Dir}/logs/kibana.log

  298. i18n.locale: "zh-CN"

  299. EOF

  300.  

  301. # 创建Kibana日志目录

  302. [ -d ${Kibana_Dir}/logs ] || mkdir ${Kibana_Dir}/logs

  303.  

  304. # 授权ELK用户管理Elasticsearch、Kibana

  305. chown -R ${User}.${User} ${Elasticsearch_Dir}

  306. chown -R ${User}.${User} ${Elasticsearch_DIR}

  307. chown -R root.root ${Kibana_Dir}

  308.  

  309. # 启动Elasticsearch

  310. #su ${User} -c "source /etc/profile >/dev/null && ${Elasticsearch_Dir}/bin/elasticsearch -d"

  311.  

  312. # 创建systemctl管理配置文件

  313. cat >/usr/lib/systemd/system/elasticsearch.service <<EOF

  314. [Unit]

  315. Description=elasticsearch

  316. After=network-online.target remote-fs.target nss-lookup.target

  317. Wants=network-online.target

  318.  

  319. [Service]

  320. LimitCORE=infinity

  321. LimitNOFILE=655360

  322. LimitNPROC=655360

  323. User=${User}

  324. Group=${User}

  325. PIDFile=${Elasticsearch_Dir}/logs/elasticsearch.pid

  326. ExecStart=${Elasticsearch_Dir}/bin/elasticsearch

  327. ExecReload=/bin/kill -s HUP $MAINPID

  328. ExecStop=/bin/kill -s TERM $MAINPID

  329. RestartSec=30

  330. Restart=always

  331. PrivateTmp=true

  332.  

  333. [Install]

  334. WantedBy=multi-user.target

  335. EOF

  336.  

  337. # 启动Elasticsearch服务

  338. systemctl daemon-reload

  339. systemctl enable elasticsearch

  340. systemctl start elasticsearch

  341.  

  342. # 判断Elasticsearch服务是否启动,启动成功才执行以下操作

  343. Code=""

  344. while sleep 10

  345. do

  346. echo -e "33[32m$(date +'%F %T') 等待Elasticsearch服务启动...33[0m"

  347. # 获取Elasticsearch服务端口

  348. netstat -lntup |egrep "9200|9300" >/dev/null

  349. if [ $? -eq 0 ];then

  350. Code="break"

  351. fi

  352. ${Code}

  353. done

  354.  

  355. # 生成Elasticsearch密码

  356. cat >/tmp/config_elasticsearch_passwd.exp <<EOF

  357. spawn su ${User} -c "source /etc/profile >/dev/null && ${Elasticsearch_Dir}/bin/elasticsearch-setup-passwords interactive"

  358. set timeout 60

  359. expect {

  360. -timeout 20

  361. "y/N" {

  362. send "y "

  363. exp_continue

  364. }

  365. "Enter password *:" {

  366. send "${Elasticsearch_Passwd} "

  367. exp_continue

  368. }

  369. "Reenter password *:" {

  370. send "${Elasticsearch_Passwd} "

  371. exp_continue

  372. }

  373. "Enter password *:" {

  374. send "${Elasticsearch_Passwd} "

  375. exp_continue

  376. }

  377. "Reenter password *:" {

  378. send "${Elasticsearch_Passwd} "

  379. exp_continue

  380. }

  381. "Enter password *:" {

  382. send "${Elasticsearch_Passwd} "

  383. exp_continue

  384. }

  385. "Reenter password *:" {

  386. send "${Elasticsearch_Passwd} "

  387. exp_continue

  388. }

  389. "Enter password *:" {

  390. send "${Elasticsearch_Passwd} "

  391. exp_continue

  392. }

  393. "Reenter password *:" {

  394. send "${Elasticsearch_Passwd} "

  395. exp_continue

  396. }

  397. "Enter password *:" {

  398. send "${Elasticsearch_Passwd} "

  399. exp_continue

  400. }

  401. "Reenter password *:" {

  402. send "${Elasticsearch_Passwd} "

  403. exp_continue

  404. }

  405. "Enter password *:" {

  406. send "${Elasticsearch_Passwd} "

  407. exp_continue

  408. }

  409. "Reenter password *:" {

  410. send "${Elasticsearch_Passwd} "

  411. exp_continue

  412. }

  413. }

  414. EOF

  415.  

  416. [ -f /usr/bin/expect ] || yum -y install expect >/dev/null

  417. expect /tmp/config_elasticsearch_passwd.exp

  418.  

  419. # 创建systemctl管理配置文件

  420. cat >/usr/lib/systemd/system/kibana.service <<EOF

  421. [Unit]

  422. Description=kibana

  423. After=network-online.target remote-fs.target nss-lookup.target

  424. Wants=network-online.target

  425.  

  426. [Service]

  427. PIDFile=/var/run/kibana.pid

  428. ExecStart=/usr/local/kibana/bin/kibana --allow-root

  429. ExecReload=/bin/kill -s HUP $MAINPID

  430. ExecStop=/bin/kill -s TERM $MAINPID

  431. PrivateTmp=false

  432.  

  433. [Install]

  434. WantedBy=multi-user.target

  435. EOF

  436.  

  437. # 启动Kibana

  438. systemctl daemon-reload

  439. systemctl enable kibana

  440. systemctl start kibana

  441.  

  442. # 判断Kibana服务是否启动,启动成功才执行以下操作

  443. Code=""

  444. while sleep 10

  445. do

  446. echo -e "33[32m$(date +'%F %T') 等待Kibana服务启动...33[0m"

  447. # 获取Kibana服务端口

  448. CODE=$(curl -s -w "%{http_code}" -o /dev/null http://${IPADDR}:5601/login)

  449. if [ ${CODE} -eq 200 ];then

  450. Code="break"

  451. fi

  452. ${Code}

  453. done

  454.  

  455. # Install Filebeat

  456. if [ ! -d ${Filebeat_Dir} ];then

  457. wget -c ${Filebeat_URL}/${Filebeat_File}

  458. tar xf ${Filebeat_File}

  459. mv ${Filebeat_File_Dir} ${Filebeat_Dir}

  460. else

  461. echo -e "33[31mThe Filebeat Already Install...33[0m"

  462. exit 1

  463. fi

  464.  

  465. # Install Logstash

  466. if [ ! -d ${Logstash_Dir} ];then

  467. wget -c ${Logstash_URL}/${Logstash_File}

  468. tar xf ${Logstash_File}

  469. mv ${Logstash_File_Dir} ${Logstash_Dir}

  470. else

  471. echo -e "33[31mThe Logstash Already Install...33[0m"

  472. exit 1

  473. fi

  474.  

  475. # Install Nginx Soft

  476. if [ ! -d ${Nginx_Dir} ];then

  477. # Install Package

  478. yum -y install pcre pcre-devel openssl openssl-devel gcc gcc-c++

  479. wget -c ${Nginx_URL}/${Nginx_File}

  480. tar zxf ${Nginx_File}

  481. cd ${Nginx_File_Dir}

  482. sed -i 's/1.18.0/ /;s/nginx//nginx/' src/core/nginx.h

  483. useradd -s /sbin/nologin www

  484. ./configure --prefix=${Nginx_Dir}

  485. --user=www

  486. --group=www

  487. --with-http_ssl_module

  488. --with-http_stub_status_module

  489. --with-stream

  490. if [ $? -eq 0 ];then

  491. make -j$(nproc) && make install

  492. echo -e "33[32mThe Nginx Install Success...33[0m"

  493. else

  494. echo -e "33[31mThe Nginx Install Failed...33[0m"

  495. exit 1

  496. fi

  497. else

  498. echo -e "33[31mThe Nginx already Install...33[0m"

  499. exit 1

  500. fi

  501.  

  502. #Config Nginx

  503. ln -sf ${Nginx_Dir}/sbin/nginx /usr/sbin

  504. cat >${Nginx_Dir}/conf/nginx.conf <<EOF

  505. user www www;

  506. worker_processes auto;

  507. pid /usr/local/nginx/logs/nginx.pid;

  508. events {

  509. use epoll;

  510. worker_connections 10240;

  511. multi_accept on;

  512. }

  513. http {

  514. include mime.types;

  515. default_type application/octet-stream;

  516. log_format json '{"@timestamp":"$time_iso8601",'

  517. '"host":"$server_addr",'

  518. '"clientip":"$remote_addr",'

  519. '"remote_user":"$remote_user",'

  520. '"request":"$request",'

  521. '"http_user_agent":"$http_user_agent",'

  522. '"size":$body_bytes_sent,'

  523. '"responsetime":$request_time,'

  524. '"upstreamtime":"$upstream_response_time",'

  525. '"upstreamhost":"$upstream_addr",'

  526. '"http_host":"$host",'

  527. '"requesturi":"$request_uri",'

  528. '"url":"$uri",'

  529. '"domain":"$host",'

  530. '"xff":"$http_x_forwarded_for",'

  531. '"referer":"$http_referer",'

  532. '"status":"$status"}';

  533. access_log logs/access.log json;

  534. error_log logs/error.log warn;

  535. sendfile on;

  536. tcp_nopush on;

  537. keepalive_timeout 120;

  538. tcp_nodelay on;

  539. server_tokens off;

  540. gzip on;

  541. gzip_min_length 1k;

  542. gzip_buffers 4 64k;

  543. gzip_http_version 1.1;

  544. gzip_comp_level 4;

  545. gzip_types text/plain application/x-javascript text/css application/xml;

  546. gzip_vary on;

  547. client_max_body_size 10m;

  548. client_body_buffer_size 128k;

  549. proxy_connect_timeout 90;

  550. proxy_send_timeout 90;

  551. proxy_buffer_size 4k;

  552. proxy_buffers 4 32k;

  553. proxy_busy_buffers_size 64k;

  554. large_client_header_buffers 4 4k;

  555. client_header_buffer_size 4k;

  556. open_file_cache_valid 30s;

  557. open_file_cache_min_uses 1;

  558. server {

  559. listen 80;

  560. server_name localhost;

  561. location / {

  562. proxy_pass http://${IPADDR}:5601;

  563. proxy_set_header Host $host;

  564. proxy_set_header X-Real-IP $remote_addr;

  565. proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

  566. }

  567. }

  568. }

  569. EOF

  570.  

  571. # 创建systemctl管理配置文件

  572. cat >/usr/lib/systemd/system/nginx.service <<EOF

  573. [Unit]

  574. Description=Nginx Server

  575. Documentation=http://nginx.org/en/docs/

  576. After=network-online.target remote-fs.target nss-lookup.target

  577. Wants=network-online.target

  578.  

  579. [Service]

  580. Type=forking

  581. PIDFile=${Nginx_Dir}/logs/nginx.pid

  582. ExecStart=${Nginx_Dir}/sbin/nginx -c ${Nginx_Dir}/conf/nginx.conf

  583. ExecReload=/bin/kill -s HUP $MAINPID

  584. ExecStop=/bin/kill -s TERM $MAINPID

  585.  

  586. [Install]

  587. WantedBy=multi-user.target

  588. EOF

  589.  

  590. # Start Nginx

  591. systemctl daemon-reload

  592. systemctl enable nginx

  593. systemctl start nginx

  594.  

  595. # 配置Filebeat

  596. cat >${Filebeat_Dir}/filebeat.yml <<EOF

  597. filebeat.inputs:

  598. - type: log

  599. enabled: true

  600. paths:

  601. - ${Nginx_Dir}/logs/access.log

  602. multiline:

  603. pattern: '^d{4}-d{1,2}-d{1,2}sd{1,2}:d{1,2}:d{1,2}'

  604. negate: true

  605. match: after

  606. fields:

  607. log_topics: nginx_access-log

  608. logtype: nginx_access

  609. - type: log

  610. enabled: true

  611. paths:

  612. - ${Nginx_Dir}/logs/error.log

  613. multiline:

  614. pattern: '^d{4}-d{1,2}-d{1,2}sd{1,2}:d{1,2}:d{1,2}'

  615. negate: true

  616. match: after

  617. fields:

  618. log_topics: nginx_error-log

  619. logtype: ginx_error

  620. output.kafka:

  621. enabled: true

  622. hosts: ["${Kafka_IP}:9092"]

  623. topic: '%{[fields][log_topics]}'

  624. EOF

  625.  

  626. # 配置Logstash

  627. cat >${Logstash_Dir}/config/nginx.conf <<EOF

  628. input {

  629. kafka {

  630. bootstrap_servers => "${Kafka_IP}:9092"

  631. group_id => "logstash-group"

  632. topics => ["nginx_access-log","nginx_error-log"]

  633. auto_offset_reset => "latest"

  634. consumer_threads => 5

  635. decorate_events => true

  636. codec => json

  637. }

  638. }

  639.  

  640. filter {

  641. if [fields][logtype] == "nginx_access" {

  642. json {

  643. source => "message"

  644. }

  645. grok {

  646. match => { "message" => "%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:level}" }

  647. }

  648. date {

  649. match => ["timestamp", "yyyy-MM-dd HH:mm:ss,SSS"]

  650. target => "@timestamp"

  651. }

  652. }

  653. if [fields][logtype] == "nginx_error" {

  654. json {

  655. source => "message"

  656. }

  657. grok {

  658. match => { "message" => "%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:level}" }

  659. }

  660. date {

  661. match => ["timestamp", "yyyy-MM-dd HH:mm:ss,SSS"]

  662. target => "@timestamp"

  663. }

  664. }

  665. }

  666.  

  667. output {

  668. if [fields][logtype] == "nginx_access" {

  669. elasticsearch {

  670. hosts => ["${Elasticsearch_IP}:9200"]

  671. user => "${Elasticsearch_User}"

  672. password => "${Elasticsearch_Passwd}"

  673. action => "index"

  674. index => "nginx_access.log-%{+YYYY.MM.dd}"

  675. }

  676. }

  677. if [fields][logtype] == "nginx_error" {

  678. elasticsearch {

  679. hosts => ["${Elasticsearch_IP}:9200"]

  680. user => "${Elasticsearch_User}"

  681. password => "${Elasticsearch_Passwd}"

  682. action => "index"

  683. index => "nginx_error.log-%{+YYYY.MM.dd}"

  684. }

  685. }

  686. }

  687. EOF

  688.  

  689. # 创建Filebeat日志目录

  690. [ -d ${Filebeat_Dir}/logs ] || mkdir ${Filebeat_Dir}/logs

  691.  

  692. # 授权ELK用户管理Filebeat、Logstash

  693. chown -R ${User}.${User} ${Filebeat_Dir}

  694. chown -R ${User}.${User} ${Logstash_Dir}

  695.  

  696. # 启动Filebeat

  697. su ${User} -c "cd ${Filebeat_Dir} && nohup ./filebeat -e -c filebeat.yml >>${Filebeat_Dir}/logs/filebeat.log >/dev/null 2>&1 &"

  698.  

  699. # 启动Logstash

  700. su ${User} -c "cd ${Logstash_Dir}/bin && nohup ./logstash -f ${Logstash_Dir}/config/nginx.conf >/dev/null 2>&1 &"

  701.  

  702. # 判断Logstash服务是否启动,启动成功才执行以下操作

  703. Code=""

  704. while sleep 10

  705. do

  706. echo -e "33[32m$(date +'%F %T') 等待Logstash服务启动...33[0m"

  707. # 获取Logstash服务端口

  708. netstat -lntup |grep "9600" >/dev/null

  709. if [ $? -eq 0 ];then

  710. Code="break"

  711. fi

  712. ${Code}

  713. done

  714.  

  715. echo -e "33[32mELK日志分析平台搭建完毕... 通过浏览器访问:http://${IPADDR} 用户名:elastic 密码:www.yangxingzhen.com33[0m"

脚本执行方式:

[root@localhost ~]# sh install_elk_filebeat_kafka.sh

脚本执行过程截图如下

Linux一键部署ELK日志平台自动化脚本

Linux一键部署ELK日志平台自动化脚本

Linux一键部署ELK日志平台自动化脚本

Linux一键部署ELK日志平台自动化脚本

  • 输入编号:7533,直达文章

  • 输入m|M,直达目录列表

以上是关于Linux一键部署ELK日志平台自动化脚本的主要内容,如果未能解决你的问题,请参考以下文章

ELK日志分析平台环境部署 (yum安装)

Linux ELK日志分析系统 | logstash日志收集 | elasticsearch 搜索引擎 | kibana 可视化平台 | 架构搭建 | 超详细

开源实时日志分析ELK平台部署

ELK服务搭建(开源实时日志分析ELK平台部署)(低版本—简单部署)

CentOS 8 部署 ELK日志分析 平台

ELK:日志收集分析平台