docker部署elk

一、准备镜像

二、创建Elasticsearch容器

2.1启动Elasticsearch容器

bash 复制代码
docker run -d --name elasticsearch \
  -e "discovery.type=single-node" \
  -e "bootstrap.memory_lock=true" \
  -e "ES_JAVA_OPTS=-Xms2g -Xmx2g" \
  -e "xpack.security.enabled=true" \
  -p 9200:9200 \
  -v /elkdata/data:/usr/share/elasticsearch/data \
  -v /elkdata/config:/usr/share/elasticsearch/config \
  --ulimit memlock=-1:-1 \
  --user "1000:1000" \
  elasticsearch:8.17.4

2.2访问Elasticsearch

访问elasticsearch服务器http://192.168.110.83:9200/

三、创建logstash容器

3.1 创建配置文件

bash 复制代码
mkdir -R /elkdata/logconfig/
cd /elkdata/logconfig/
vim logstash.conf
bash 复制代码
input {
  beats {
    port => 5044
    ssl => false
    tags => ["beats_input"]
  }
}

filter {
 # fingerprint {
 #   source => ["@timestamp", "host", "message"]
 #   method => "MURMUR3"  # 更高效哈希算法[6](@ref)
 #   target => "[@metadata][fingerprint]"
 #   key => "${DEDUPLICATION_KEY}"  # 通过环境变量注入密钥
 # }

  grok {
    match => { 
      "message" => "%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:loglevel} %{GREEDYDATA:message}" 
    }
    tag_on_failure => ["_grokparsefailure"]
  }

  date {
    match => ["timestamp", "ISO8601"]
    timezone => "Asia/Shanghai"  # 强制时区统一[3](@ref)
  }
}

output {
  if [fields][log_type] == "appkf" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appkf-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      retry_on_conflict => 5
      # ssl_certificate_verification => false
      # dead_letter_queue_enable => true  # 启用死信队列[5](@ref)
      # dlq_path => "/var/logstash/dlq"
    }
  }
  else if [fields][log_type] == "appcs" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appcs-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      # ssl_certificate_verification => false
    }
  }
  else if [fields][log_type] == "appsc" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appsc-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      # ssl_certificate_verification => false
    }
  }
  else {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "unknown-%{+YYYY.MM.dd}"
      # ssl_certificate_verification => false

    }
  }

  # 开发调试输出
  if "_grokparsefailure" in [tags] {
    stdout { codec => rubydebug }
  }
}

3.2.运行Logstash容器

bash 复制代码
# 创建网络(若未创建)
docker network create elknet
bash 复制代码
docker run -d   --name logstash   -p 5044:5044   -v /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf   --net elknet  logstash:8.17.4

3.3.验证logstash是否正常启动

bash 复制代码
 docker logs -f logstash

四、创建Kibana容器

4.1.运行Kibana容器

bash 复制代码
docker run -d   --name kibana   -p 5601:5601   -e "ELASTICSEARCH_HOSTS=http://elasticsearch:9200"   --link elasticsearch:elasticsearch   kibana:8.17.4

4.2.验证kibana是否正常启动

bash 复制代码
docker logs -f kibana

4.3 访问kibana

http://192.168.110.83:5601/

五、配置kibana

5.1 制造测试数据

test.py

python 复制代码
from elasticsearch import Elasticsearch
from datetime import datetime

# 连接到 Elasticsearch
es = Elasticsearch(
    ["http://192.168.110.83:9200"]
)

# 定义日志内容
log_data = {
    "timestamp": datetime.now(),
    "level": "ERROR",
    "message": "This is a test log message from Python"
}

# 写入 Elasticsearch 索引
response = es.index(index="test-logs", document=log_data)
print(response)

执行以下命令:

bash 复制代码
pip install elasticsearch
python test.py

5.2.配置日志视图

点击discover

新建如下视图:

六、使用Filebeat作为日志采集器

6.1在所有目标服务器安装Filebeat

bash 复制代码
# 导入GPG密钥
sudo rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

# 创建仓库配置文件
sudo tee /etc/yum.repos.d/elastic.repo <<EOF
[elasticsearch-8.x]
name=Elasticsearch repository for 8.x packages
baseurl=https://artifacts.elastic.co/packages/8.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF
bash 复制代码
sudo yum update -y
sudo yum install filebeat -y

6.2配置Filebbeat

在/etc/filebeat/下新建一个yml文件

yml 复制代码
filebeat.inputs:
  - type: log
    enabled: true
    paths:
      - /www/wwwroot/zyj_sdev/logs/[0-9][0-9][0-9][0-9]-*/*.log  # 指定日志路径
    close_inactive: 2h       # 覆盖日志滚动周期
    ignore_older: 72h        # 忽略3天前的文件
    scan_frequency: 30s      # 平衡扫描频率与性能
    clean_inactive: 168h     # 7天后清理注册表记录
    fields:
      log_type: "appkf"  # 添加自定义标签
output.logstash:
  hosts: ["192.168.110.83:5044"]  # 指向Logstash服务器

修改/usr/lib/systemd/system/filebeat.service

bash 复制代码
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/beats/filebeat
Wants=network-online.target
After=network-online.target

[Service]

UMask=0027
Environment="GODEBUG='madvdontneed=1'"
Environment="BEAT_LOG_OPTS="
#Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat.yml"
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/zyjkf.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat --path.logs /var/log/filebeat"
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always

[Install]
WantedBy=multi-user.target

将环境指向新的yml文件

6.3启动Filebeat

bash 复制代码
systemctl enable filebeat
systemctl start filebeat

七、docker compose部署elk

yml 复制代码
version: '3.8'

services:
  elasticsearch:
    image: elasticsearch:8.17.4
    container_name: elasticsearch
    user: "1000:1000"  
    environment:
      - discovery.type=single-node
      - bootstrap.memory_lock=true
      - xpack.security.enabled=false
      - ES_JAVA_OPTS=-Xms2g -Xmx2g
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /elkdata/data:/usr/share/elasticsearch/data
      - /elkdata/config:/usr/share/elasticsearch/config
    ports:
      - "9200:9200"
    networks:
      - elk

  logstash:
    image: logstash:8.17.4
    container_name: logstash
    environment:
      - config.reload.automatic=true
      - config.reload.interval=3s
    volumes:
      - /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
    ports:
      - "9600:9600"
      - "5044:5044"
    networks:
      - elk
    depends_on:
      - elasticsearch

  kibana:
    image: kibana:8.17.4
    container_name: kibana
    environment:
      - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
      - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=1c587411189db344f41a0cd98bc4e778
    ports:
      - "5601:5601"
    networks:
      - elk
    depends_on:
      - elasticsearch

networks:
  elk:
    driver: bridge
bash 复制代码
docker compose up -d
相关推荐
努力一点9485 分钟前
ubuntu22.04系统入门 linux入门 简单命令基础复习 实现以及实践
linux·运维·服务器·ubuntu·gpu算力
白鹭11 分钟前
自动化备份全网服务器数据平台
运维·服务器·自动化
卓豪终端管理19 分钟前
电脑远程关机的重要性
运维·网络·devops
draymond710730 分钟前
Dockerfile详解
运维·docker·容器
帅帅梓32 分钟前
Linux性能检测与调优
linux·运维·php
鱼儿不吐泡1 小时前
阿里云服务器通过 docker CE 安装 Redis+Mysql+Nginx
运维·自动化运维
小猪咪piggy2 小时前
【JavaEE】(7) 网络原理 TCP/IP 协议
运维·服务器·网络
Asuicao2 小时前
ansible巡检脚本
linux·运维·ansible
Wezzer3 小时前
企业级部署 (基于tomcat与nginx)
linux·运维·服务器
Teamhelper_AR3 小时前
AR智能巡检:制造业运维效率提升的关键
运维·ar