日志收集 实例

Posted 大爷来玩呀你懂得

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了日志收集 实例相关的知识,希望对你有一定的参考价值。

日志收集流程描述
注意:当es集群重启后记得在kibana中执行

PUT /_cluster/settings
{
  "transient": {
    "cluster": {
      "max_shards_per_node":10000
    }
  }
}

tomcat 日志收集

filebeat conf

[root@tomcat-prod_20 ~]# cd /data/work/filebeat-5.5.2/
[root@tomcat-prod_20 filebeat-5.5.2]# cat filebeat.yml 
filebeat.prospectors:
- input_type: log
  paths:
    - /data/WEBLOG/prod-ecommerce-app/catalina.out
  document_type: tykh_insurance_ecommerce-app_78pro
  multiline:
         pattern: \'^\\d{4}(\\-|\\/|\\.)\\d{1,2}(\\-|\\/|\\.)\\d{1,2}\'
         negate: true
         match: after
         max_lines: 100
         timeout: 3s
  fields:                               
    logtype: tykh_insurance_ecommerce-app_78pro
tail_files: false
output.kafka:
  enabled: true
  hosts: ["10.100.20.1xx:9092","10.100.20.1x1:9092","10.100.20.1x2:9092"]
  topic: tykh-140
  compression: gzip
  max_message_bytes: 1000000
  required_acks: 1

logstash

[root@localhost conf.d]# cat insurace-140.conf 
input {
    kafka {
        bootstrap_servers => ["10.100.20.1xx:9092,10.100.20.1x1:9092,10.100.20.1x2:9092"]
        topics => ["tykh-140"]
        codec => "json"
        consumer_threads => 1
        #auto_offset_reset => "earliest"
        auto_offset_reset => "latest"
        group_id => "tykh-140"
        decorate_events => true
    max_partition_fetch_bytes => "52428700"
    max_poll_records => "200"
    session_timeout_ms => "50000"
    request_timeout_ms => "510000"
    heartbeat_interval_ms => "1000"
        }
}
filter {
   grok  {
        patterns_dir => [ "/etc/logstash/patterns.d" ]
        match => [ "message", "%{TIMESTAMP_ISO8601:log_time}\\s+\\[%{THREADID:threadId}\\]\\s+\\[%{THREADNAME:traceid}\\]\\s+%{LOGLEVEL:level}\\s+%{JAVACLASS:javaclass}\\s+\\-\\s+%{JAVAMESSAGE:javameassage}","message", "%{TIMESTAMP_ISO8601:log_time}\\s+\\[%{THREADID_1:threadId}\\]\\s+%{LOGLEVEL:level}\\s+%{JAVACLASS:javaclass}\\s+\\-\\s+%{JAVAMESSAGE:javameassage}","message","%{TIMESTAMP_ISO8601:log_time}\\s+%{TID:TID}\\s+\\[%{THREADID_1:threadId}\\]\\s+%{LOGLEVEL:level}\\s+%{JAVACLASS:javaclass}\\s+\\-\\s+%{JAVAMESSAGE:javameassage}"]
        remove_field => [ "message","beat","timestamp","topic","hostname","name","index","host","tags"]
   }
   ruby {
        code => "event.timestamp.time.localtime"
      }
   date {match=>["log_time","yyyy-MM-dd HH:mm:ss.SSS"]}
}

output {
   if [fields][logtype] == "tykh_insurance_ecommerce-app_78pro" {
        elasticsearch {
        hosts => ["10.100.20.1xx:9200","10.100.20.1xx:9200","10.100.20.1x8:9200"]
            index => "tykh_insurance_ecommerce-app_78pro%{+YYYY-MM-dd}"
            user => elasxxx
            password => "elasticsearcxxx"
        }
        stdout { codec => rubydebug }
        }


}

k8s logs (在jenkins )

[root@insurace-24 ~]# cat /root/docker/scripts/install_logstash.sh
#!/bin/bash
confpath=~/docker/scripts/conf
repo=harborxx.reg/pre_jinfu
app=$1
topics_pattern=$2
profile=$3
project=$4
master_host=10.100.24.xx
yaml_host=http://10.100.24.1x2:8889

cd $confpath
mkdir -p $app/$profile
echo "---logstash-configmap.yaml---"
cat logstash-configmap-template.yaml | sed "s|#topics_pattern#|$topics_pattern|g" | sed "s|#project#|$project|g" | sed "s|#profile#|$profile|g"
cat logstash-configmap-template.yaml | sed "s|#topics_pattern#|$topics_pattern|g" | sed "s|#project#|$project|g" | sed "s|#profile#|$profile|g" > $app/$profile/logstash-configmap.yaml
echo "---logstash.yaml---"
cat logstash-template.yaml | sed "s|#topics_pattern#|$topics_pattern|g" | sed "s|#project#|$project|g" | sed "s|#profile#|$profile|g" 
cat logstash-template.yaml | sed "s|#topics_pattern#|$topics_pattern|g" | sed "s|#project#|$project|g" | sed "s|#profile#|$profile|g" > $app/$profile/logstash.yaml
ssh $master_host "kubectl apply -f $yaml_host/$app/$profile/logstash-configmap.yaml && kubectl apply -f $yaml_host/$app/$profile/logstash.yaml"

logstash-template.yaml

[root@insurace-24 conf]# cat logstash-template.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: logstash-#topics_pattern#-#profile#
  namespace: default
spec:
  selector:
    matchLabels:
      app: logstash-#topics_pattern#-#profile#
  template:
    metadata:
      labels:
        app: logstash-#topics_pattern#-#profile#
    spec:
      containers:
      - name: logstash-#topics_pattern#-#profile#
        image: harborxx.reg/library/logstash:7.6.2.1
        imagePullPolicy: IfNotPresent
        command:
        - logstash
        - \'-f\'
        - \'/etc/logstash_c/logstash-#project#-#topics_pattern#-#profile#.conf\'
        volumeMounts:
        - name: config-volume
          mountPath: /etc/logstash_c/
        resources:
          limits:
            cpu: 1000m
            memory: 1348Mi
      volumes:
      - name: config-volume
        configMap:
          name: logstash-#project#-#topics_pattern#-#profile#
          items:
          - key: logstash-#project#-#topics_pattern#-#profile#.conf
            path: logstash-#project#-#topics_pattern#-#profile#.conf
/root/docker/scripts/install_logstash.sh prodpipeline-assessment-back e-assessment-back profile-a insurance
---logstash-configmap.yaml---
kind: ConfigMap
apiVersion: v1
metadata:
  name: logstash-insurance-e-assessment-back-profile-a
  namespace: default
data:
  logstash-insurance-e-assessment-back-profile-a.conf: |
   input {
    kafka {
        bootstrap_servers => ["10.100.24.xx:9092"]
        topics_pattern  => "e-assessment-back.*"
        codec => "json"
        consumer_threads => 5
        auto_offset_reset => "latest"
        group_id => "e-assessment-back"
        client_id => "e-assessment-back"
        decorate_events => true
        #auto_commit_interval_ms => 5000
        }
    }

    filter {
      json {
        source => "message"
      }
      date {
        match => [ "timestamp" ,"dd/MMM/YYYY:HH:mm:ss Z" ]
      }
      mutate {
        remove_field => "timestamp"
      }
      if "_geoip_lookup_failure" in [tags] { drop { } }
    }

    output {
      elasticsearch {
        hosts => ["10.100.24.xx:9200"] 
        index => "logstash-insurance-e-assessment-back-%{+YYYY-MM-dd}"
        user => elastic
        password => "Elasticsearch_Insuance24*#"
      }
    stdout { codec => rubydebug }
   }
---logstash.yaml---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: logstash-e-assessment-back-profile-a
  namespace: default
spec:
  selector:
    matchLabels:
      app: logstash-e-assessment-back-profile-a
  template:
    metadata:
      labels:
        app: logstash-e-assessment-back-profile-a
    spec:
      containers:
      - name: logstash-e-assessment-back-profile-a
        image: harborxx.reg/library/logstash:7.6.2.1
        imagePullPolicy: IfNotPresent
        command:
        - logstash
        - \'-f\'
        - \'/etc/logstash_c/logstash-insurance-e-assessment-back-profile-a.conf\'
        volumeMounts:
        - name: config-volume
          mountPath: /etc/logstash_c/
        resources:
          limits:
            cpu: 1000m
            memory: 1348Mi
      volumes:
      - name: config-volume
        configMap:
          name: logstash-insurance-e-assessment-back-profile-a
          items:
          - key: logstash-insurance-e-assessment-back-profile-a.conf
            path: logstash-insurance-e-assessment-back-profile-a.conf
configmap/logstash-insurance-e-assessment-back-profile-a created
deployment.apps/logstash-e-assessment-back-profile-a created

以上是关于日志收集 实例的主要内容,如果未能解决你的问题,请参考以下文章

8、使用多实例filebeat收集日志

精心收集的 48 个 JavaScript 代码片段,仅需 30 秒就可理解

日志收集系统Flume调研笔记第2篇 - Flume配置及使用实例

垃圾收集器与内存分配策略之篇三:理解GC日志和垃圾收集器参数总结

精心收集的 48 个 JavaScript 代码片段,仅需 30 秒就可理解!(转载)

argparse 代码片段只打印部分日志