17、ELK

17、ELK

helm 安装 elkfk(kafka 集群外可访问)

ES/Kibana <--- Logstash <--- Kafka <--- Filebeat

部署顺序:

1、elasticsearch

2、kibana

3、kafka

4、logstash

5、filebeat

shell 复制代码
kubectl create ns elk

helm3部署elkfk

1、elasticsearch
shell 复制代码
helm repo add elastic https://helm.elastic.co

helm repo list

helm repo update
shell 复制代码
helm search repo elastic/elasticsearch

cd && helm pull elastic/elasticsearch --untar --version 7.17.3

cd elasticsearch
shell 复制代码
cat > values-prod.yaml << EOF
# 集群名称
clusterName: "elasticsearch"
# ElasticSearch 6.8+ 默认安装了 x-pack 插件,部分功能免费,这里选禁用
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "7.17.3"
imagePullPolicy: "IfNotPresent"

esConfig:
 elasticsearch.yml: |
    network.host: 0.0.0.0
    cluster.name: "elasticsearch"
    xpack.security.enabled: false
resources:
  limits:
    cpu: "2"
    memory: "4Gi"
  requests:
    cpu: "1"
    memory: "2Gi"
volumeClaimTemplate:
  storageClassName: "nfs-storage"
  accessModes: [ "ReadWriteOnce" ]
  resources:
    requests:
      storage: 2Ti
service:
  type: NodePort
  port: 9000
  nodePort: 31311
EOF

禁用 Kibana 安全提示(Elasticsearch built-in security features are not enabled)xpack.security.enabled: false

shell 复制代码
helm upgrade --install --namespace elk es -f ./values-prod.yaml .

验证

shell 复制代码
curl 192.168.1.200:31311/_cat/health

curl 192.168.1.200:31311/_cat/nodes
2、kibana
shell 复制代码
helm search repo elastic/kibana

cd && helm pull elastic/kibana --untar --version 7.17.3

cd kibana
shell 复制代码
cat > values-prod.yaml << 'EOF'
kibanaConfig:
   kibana.yml: |
     server.port: 5601
     server.host: "0.0.0.0"
     elasticsearch.hosts: [ "http://elasticsearch-master-headless:9200" ]
resources:
  limits:
    cpu: "2"
    memory: "2Gi"
  requests:
    cpu: "1"
    memory: "1Gi"
kibanaConfig:
  kibana.yml: |
    i18n.locale: "zh-CN"
service:
  #type: ClusterIP
  type: NodePort
  loadBalancerIP: ""
  port: 5601
  nodePort: "30026"
EOF
shell 复制代码
helm upgrade --install --namespace elk kibana -f ./values-prod.yaml .
shell 复制代码
cat > ~/kibana/kibana-Ingress.yml << 'EOF'
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: kibana-ingress
  namespace: elk
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: 'true'
    nginx.ingress.kubernetes.io/proxy-body-size: '4G'
    nginx.ingress.kubernetes.io/auth-type: basic
    nginx.ingress.kubernetes.io/auth-secret: kibana-auth-secret
    nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - admin'
spec:
  ingressClassName: nginx
  rules:
  - host: kibana.huanghuanhui.cloud
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: kibana-kibana
            port:
              number: 5601
  tls:
  - hosts:
    - kibana.huanghuanhui.cloud
    secretName: kibana-ingress-tls
EOF
shell 复制代码
yum -y install httpd-tools

cd ~/kibana && htpasswd -bc auth admin Admin@2024

kubectl create secret generic kibana-auth-secret --from-file=auth -n elk
shell 复制代码
kubectl create secret -n elk \
tls kibana-ingress-tls \
--key=/root/ssl/huanghuanhui.cloud.key \
--cert=/root/ssl/huanghuanhui.cloud.crt
shell 复制代码
kubectl apply -f ~/kibana/kibana-Ingress.yml 

访问地址:kibana.huanghuanhui.cloud

账号密码:admin、Admin@2024

shell 复制代码
http://192.168.1.201:30026/app/dev_tools#/console

GET _cat/nodes

GET _cat/health

GET _cat/indices
3、kafka(k8s部署kafka集群 ==》外部访问)
shell 复制代码
mkdir -p ~/kafka-yml && cd ~/kafka-yml
shell 复制代码
cat > ~/kafka-yml/zk.yml << 'EOF'
apiVersion: v1
kind: Service
metadata:
  labels:
    app: zookeeper-cluster
  namespace: elk
  name: zookeeper-cluster
spec:
  selector:
    app: zookeeper-cluster
  ports:
    - name: client
      port: 2181
      targetPort: 2181
    - name: follower
      port: 2888
      targetPort: 2888
    - name: leader
      port: 3888
      targetPort: 3888
  clusterIP: None
---
apiVersion: v1
kind: Service
metadata:
  namespace: elk
  name: zookeeper-cs
spec:
  selector:
    app: zookeeper-cluster
  type: NodePort
  ports:
    - name: client
      port: 2181
      nodePort: 30152
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: elk
  name: crs-zookeeper
spec:
  replicas: 3
  podManagementPolicy: Parallel
  serviceName: zookeeper-cluster
  selector:
    matchLabels:
      app: zookeeper-cluster
  template:
    metadata:
      labels:
        component: zookeeper-cluster
        app: zookeeper-cluster
    spec:
      containers:
        - name: zookeeper
          image: bitnami/zookeeper:3.8.2
          imagePullPolicy: IfNotPresent
          securityContext:
            runAsUser: 0
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          lifecycle:
            postStart:
              exec:
                command:
                  - "sh"
                  - "-c"
                  - >
                    echo $(( $(cat /etc/hosts | grep zookeeper | awk '{print($3)}' | awk '{split($0,array,"-")} END{print array[3]}') + 1 )) > /bitnami/zookeeper/data/myid
          env:
            - name: ALLOW_ANONYMOUS_LOGIN
              value: "yes"
            - name: ZOO_SERVERS
              value:  crs-zookeeper-0.zookeeper-cluster.elk.svc.cluster.local:2888:3888,crs-zookeeper-1.zookeeper-cluster.elk.svc.cluster.local:2888:3888,crs-zookeeper-2.zookeeper-cluster.elk.svc.cluster.local:2888:3888
          volumeMounts:
            - name: zoodata-outer
              mountPath: /bitnami/zookeeper
  volumeClaimTemplates:
    - metadata:
        name: zoodata-outer
      spec:
        storageClassName: nfs-storage
        accessModes:
          - "ReadWriteOnce"
        resources:
          requests:
            storage: 2Ti
EOF
shell 复制代码
kubectl apply -f ~/kafka-yml/zk.yml
shell 复制代码
cat > ~/kafka-yml/kafka.yml << 'EOF'
apiVersion: v1
kind: Service
metadata:
  namespace: elk
  name: kafka-headless
spec:
  selector:
    app: kafka-cluster
  ports:
    - name: client
      port: 9092
      targetPort: 9092
  clusterIP: None
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-0
  namespace: elk
  labels:
    app: kafka-cluster
spec:
  ports:
    - port: 9092
      targetPort: 9092
      nodePort: 30127
      name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: crs-kafka-0
#    app: kafka-cluster
 
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-1
  namespace: elk
  labels:
    app: kafka-cluster
spec:
  ports:
    - port: 9092
      targetPort: 9092
      nodePort: 30128
      name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: crs-kafka-1
 
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-2
  namespace: elk
  labels:
    app: kafka-cluster
spec:
  ports:
    - port: 9092
      targetPort: 9092
      nodePort: 30129
      name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: crs-kafka-2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: elk
  name: crs-kafka
spec:
  replicas: 3
  podManagementPolicy: Parallel
  serviceName: kafka-cluster
  selector:
    matchLabels:
      app: kafka-cluster
  template:
    metadata:
      labels:
        app: kafka-cluster
    spec:
      hostname: kafka
      containers:
        - name: kafka
          command:
            - bash
            - -ec
            - |
              HOSTNAME=`hostname -s`
              if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
                ORD=${BASH_REMATCH[2]}
                PORT=$((ORD + 30127))
                export KAFKA_CFG_ADVERTISED_LISTENERS="PLAINTEXT://192.168.1.200:$PORT"
              else
                echo "Failed to get index from hostname $HOST"
                exit 1
              fi
              exec /entrypoint.sh /run.sh
          image: bitnami/kafka:3.5.1
          #        image: bitnami/kafka:latest
          imagePullPolicy: IfNotPresent
          securityContext:
            runAsUser: 0
#          resources:
#            requests:
#              memory: "1G"
#              cpu: "0.5"
          ports:
            - containerPort: 9092
          env:
            - name: KAFKA_CFG_ZOOKEEPER_CONNECT
              value: crs-zookeeper-0.zookeeper-cluster.elk.svc.cluster.local:2181,crs-zookeeper-1.zookeeper-cluster.elk.svc.cluster.local:2181,crs-zookeeper-2.zookeeper-cluster.elk.svc.cluster.local:2181
            #          value: zookeeper-cluster:2181
            - name: ALLOW_PLAINTEXT_LISTENER
              value: "yes"
          volumeMounts:
            - name: kafkadata-outer
              mountPath: /bitnami/kafka
  volumeClaimTemplates:
    - metadata:
        name: kafkadata-outer
      spec:
        storageClassName: nfs-storage
        accessModes:
          - "ReadWriteOnce"
        resources:
          requests:
            storage: 2Ti
EOF
shell 复制代码
kubectl apply -f ~/kafka-yml/kafka.yml

注意修改yml文件98行里面的export的ip地址

这里修改为公网的ip:58.34.61.154(内网192.168.1.200)

kafka ui

shell 复制代码
docker pull provectuslabs/kafka-ui:latest

docker pull freakchicken/kafka-ui-lite
shell 复制代码
docker run -d \
--name kafka-ui1 \
--restart always \
--privileged=true \
-p 8888:8080 \
-e KAFKA_CLUSTERS_0_NAME=k8s-kafka \
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=192.168.1.200:30127,192.168.1.200:30128,192.168.1.200:30129 \
provectuslabs/kafka-ui:latest

访问地址:192.168.1.200:8888

shell 复制代码
docker run -d \
--name kafka-ui2 \
--restart always \
--privileged=true \
-p 8889:8889 \
freakchicken/kafka-ui-lite

访问地址:192.168.1.200:8889

4、filebeat

1、k8s方式

shell 复制代码
helm search repo elastic/filebeat

cd && helm pull elastic/filebeat --untar --version 7.17.3

cd filebeat
shell 复制代码
cat > values-prod.yaml << 'EOF'
daemonset:
  filebeatConfig:
    filebeat.yml: |
      filebeat.inputs:
      - type: container
        paths:
          - /var/log/containers/*.log

      output.elasticsearch:
        enabled: false
        host: '${NODE_NAME}'
        hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
      output.kafka:
       enabled: true
       hosts: ["192.168.1.200:30127","192.168.1.200:30128","192.168.1.200:30129"]
       topic: k8s-logs
EOF
shell 复制代码
helm upgrade --install --namespace elk filebeat -f ./values-prod.yaml .

2、docker方式

shell 复制代码
cat > filebeat.yml << 'EOF'
# 日志输入配置(可配置多个)
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /mnt/nfs/logs/*/*.log
  tags: ["dev-c"]
  fields:
    server: dev-c
  fields_under_root: true
#日志输出配置
output.kafka:
  enabled: true
  hosts: ["192.168.1.200:30127","192.168.1.200:30128","192.168.1.200:30129"]
  topic: "dev-c"
  partition.round_robin:
    reachable_only: false
  required_acks: 1
  compression: gzip
  max_message_bytes: 1000000
EOF
shell 复制代码
docker run -d --name filebeat \
--user=root \
--restart=always \
-v /mnt/nfs/logs/:/mnt/nfs/logs/ \
-v /root/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml \
-v /etc/localtime:/etc/localtime \
-v /etc/timezone:/etc/timezone \
elastic/filebeat:7.17.3 \
5、logstash
shell 复制代码
helm search repo elastic/logstash

cd && helm pull elastic/logstash --untar --version 7.17.3

cd logstash
shell 复制代码
cat > values-prod.yaml << 'EOF'
logstashConfig:
  logstash.yml: |
    xpack.monitoring.enabled: false

logstashPipeline: 
   logstash.yml: |
    input {
      kafka {
            bootstrap_servers => "192.168.1.200:30127,192.168.1.200:30128,192.168.1.200:30129"
            topics => ["k8s-logs"]
            #group_id => "mygroup"
            #如果使用元数据就不能使用下面的byte字节序列化,否则会报错
            #key_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"
            #value_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"
            consumer_threads => 1
            #默认为false,只有为true的时候才会获取到元数据
            decorate_events => true
            auto_offset_reset => "earliest"
         }
    }
    filter {
      mutate {
        #从kafka的key中获取数据并按照逗号切割
        split => ["[@metadata][kafka][key]", ","]
        add_field => {
            #将切割后的第一位数据放入自定义的"index"字段中
            "index" => "%{[@metadata][kafka][key][0]}"
        }
      }
    }
    output { 
      elasticsearch {
          pool_max => 1000
          pool_max_per_route => 200
          hosts => ["elasticsearch-master-headless.elk.svc.cluster.local:9200"]
          index => "k8s-logs-%{+YYYY.MM.dd}"
      }
    }

# 资源限制
resources:
  requests:
    cpu: "100m"
    memory: "256Mi"
  limits:
    cpu: "1000m"
    memory: "1Gi"

persistence:
  enabled: true
  
volumeClaimTemplate:
  accessModes: ["ReadWriteOnce"]
  storageClassName: nfs-storage
  resources:
    requests:
      storage: 2Ti
EOF
shell 复制代码
helm upgrade --install --namespace elk logstash -f ./values-prod.yaml .

手撕yml

shell 复制代码
mkdir -p ~/logstash-yml && cd ~/logstash-yml
cat > logstash.yaml << 'EOF'
apiVersion: v1
kind: ConfigMap
metadata:
  name: logstash-dev-configmap
  namespace: elk
data:
  logstash.yml: |
    http.host: "0.0.0.0"
    path.config: /usr/share/logstash/pipeline
  logstash.conf: |
    input {
      kafka {
        bootstrap_servers => "192.168.1.200:30127,192.168.1.200:30128,192.168.1.200:30129"
        topics => ["dev"]
        codec => "json"
        type => "dev"
        group_id => "dev"
        consumer_threads => 1
      }
    }
    filter {
        if [type] == "dev" {
            json {
                source => ["message"]
                remove_field => ["offset","host","beat","@version","event","agent","ecs"]
            }
            mutate {
                add_field => {
                project_path => "%{[log][file][path]}"
                }
            }
            mutate {
                split => ["project_path", "/"]
                add_field => {
                    "project_name" => "%{[project_path][-3]}"
                }
            }
            date {
                match => ["time","yyyy-MM-dd HH:mm:ss.SSS"]
                timezone => "Asia/Shanghai"
                target => "@timestamp"
            }
            mutate {
               remove_field => ["log","project_path","time","input"]
            }
        }
    }
    output {
        elasticsearch {
            hosts => ["elasticsearch-master-headless.elk.svc.cluster.local:9200"]
            index => "dev-%{+YYYY.MM.dd}"
        }
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: logstash-dev
  namespace: elk
spec:
  selector:
    matchLabels:
      app: logstash-dev
  replicas: 1
  template:
    metadata:
      labels:
        app: logstash-dev
    spec:
      containers:
      - name: logstash-dev
        image: docker.elastic.co/logstash/logstash:7.17.3
        ports:
        - containerPort: 5044
        volumeMounts:
          - name: logstash-pipeline-volume
            mountPath: /usr/share/logstash/pipeline
          - mountPath: /etc/localtime
            name: localtime
      volumes:
      - name: logstash-pipeline-volume
        configMap:
          name: logstash-dev-configmap
          items:
            - key: logstash.conf
              path: logstash.conf
      - hostPath:
          path: /etc/localtime
        name: localtime
---
kind: Service
apiVersion: v1
metadata:
  name: logstash-dev
  namespace: elk
spec:
  selector:
    app: logstash
  type: ClusterIP
  ports:
  - protocol: TCP
    port: 5044
    targetPort: 5044
EOF
shell 复制代码
kubectl apply -f logstash.yaml
相关推荐
速盾cdn11 分钟前
速盾:CDN缓存的工作原理是什么?
网络·安全·web安全
Code_Artist1 小时前
使用Portainer来管理并编排Docker容器
docker·云原生·容器
Eternal-Student1 小时前
【docker 保存】将Docker镜像保存为一个离线的tar归档文件
运维·docker·容器
网络安全-杰克1 小时前
网络安全概论
网络·web安全·php
怀澈1221 小时前
高性能服务器模型之Reactor(单线程版本)
linux·服务器·网络·c++
码农小丘1 小时前
一篇保姆式centos/ubuntu安装docker
运维·docker·容器
广煜永不挂科1 小时前
Devexpress.Dashboard的调用二义性
c#·express
耗同学一米八2 小时前
2024 年河北省职业院校技能大赛网络建设与运维赛项样题二
运维·网络·mariadb
skywalk81632 小时前
树莓派2 安装raspberry os 并修改成固定ip
linux·服务器·网络·debian·树莓派·raspberry
C++忠实粉丝2 小时前
计算机网络socket编程(3)_UDP网络编程实现简单聊天室
linux·网络·c++·网络协议·计算机网络·udp