docker部署elk

一、准备镜像

二、创建Elasticsearch容器

2.1启动Elasticsearch容器

bash 复制代码
docker run -d --name elasticsearch \
  -e "discovery.type=single-node" \
  -e "bootstrap.memory_lock=true" \
  -e "ES_JAVA_OPTS=-Xms2g -Xmx2g" \
  -e "xpack.security.enabled=true" \
  -p 9200:9200 \
  -v /elkdata/data:/usr/share/elasticsearch/data \
  -v /elkdata/config:/usr/share/elasticsearch/config \
  --ulimit memlock=-1:-1 \
  --user "1000:1000" \
  elasticsearch:8.17.4

2.2访问Elasticsearch

访问elasticsearch服务器http://192.168.110.83:9200/

三、创建logstash容器

3.1 创建配置文件

bash 复制代码
mkdir -R /elkdata/logconfig/
cd /elkdata/logconfig/
vim logstash.conf
bash 复制代码
input {
  beats {
    port => 5044
    ssl => false
    tags => ["beats_input"]
  }
}

filter {
 # fingerprint {
 #   source => ["@timestamp", "host", "message"]
 #   method => "MURMUR3"  # 更高效哈希算法[6](@ref)
 #   target => "[@metadata][fingerprint]"
 #   key => "${DEDUPLICATION_KEY}"  # 通过环境变量注入密钥
 # }

  grok {
    match => { 
      "message" => "%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:loglevel} %{GREEDYDATA:message}" 
    }
    tag_on_failure => ["_grokparsefailure"]
  }

  date {
    match => ["timestamp", "ISO8601"]
    timezone => "Asia/Shanghai"  # 强制时区统一[3](@ref)
  }
}

output {
  if [fields][log_type] == "appkf" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appkf-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      retry_on_conflict => 5
      # ssl_certificate_verification => false
      # dead_letter_queue_enable => true  # 启用死信队列[5](@ref)
      # dlq_path => "/var/logstash/dlq"
    }
  }
  else if [fields][log_type] == "appcs" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appcs-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      # ssl_certificate_verification => false
    }
  }
  else if [fields][log_type] == "appsc" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appsc-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      # ssl_certificate_verification => false
    }
  }
  else {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "unknown-%{+YYYY.MM.dd}"
      # ssl_certificate_verification => false

    }
  }

  # 开发调试输出
  if "_grokparsefailure" in [tags] {
    stdout { codec => rubydebug }
  }
}

3.2.运行Logstash容器

bash 复制代码
# 创建网络(若未创建)
docker network create elknet
bash 复制代码
docker run -d   --name logstash   -p 5044:5044   -v /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf   --net elknet  logstash:8.17.4

3.3.验证logstash是否正常启动

bash 复制代码
 docker logs -f logstash

四、创建Kibana容器

4.1.运行Kibana容器

bash 复制代码
docker run -d   --name kibana   -p 5601:5601   -e "ELASTICSEARCH_HOSTS=http://elasticsearch:9200"   --link elasticsearch:elasticsearch   kibana:8.17.4

4.2.验证kibana是否正常启动

bash 复制代码
docker logs -f kibana

4.3 访问kibana

http://192.168.110.83:5601/

五、配置kibana

5.1 制造测试数据

test.py

python 复制代码
from elasticsearch import Elasticsearch
from datetime import datetime

# 连接到 Elasticsearch
es = Elasticsearch(
    ["http://192.168.110.83:9200"]
)

# 定义日志内容
log_data = {
    "timestamp": datetime.now(),
    "level": "ERROR",
    "message": "This is a test log message from Python"
}

# 写入 Elasticsearch 索引
response = es.index(index="test-logs", document=log_data)
print(response)

执行以下命令:

bash 复制代码
pip install elasticsearch
python test.py

5.2.配置日志视图

点击discover

新建如下视图:

六、使用Filebeat作为日志采集器

6.1在所有目标服务器安装Filebeat

bash 复制代码
# 导入GPG密钥
sudo rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

# 创建仓库配置文件
sudo tee /etc/yum.repos.d/elastic.repo <<EOF
[elasticsearch-8.x]
name=Elasticsearch repository for 8.x packages
baseurl=https://artifacts.elastic.co/packages/8.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF
bash 复制代码
sudo yum update -y
sudo yum install filebeat -y

6.2配置Filebbeat

在/etc/filebeat/下新建一个yml文件

yml 复制代码
filebeat.inputs:
  - type: log
    enabled: true
    paths:
      - /www/wwwroot/zyj_sdev/logs/[0-9][0-9][0-9][0-9]-*/*.log  # 指定日志路径
    close_inactive: 2h       # 覆盖日志滚动周期
    ignore_older: 72h        # 忽略3天前的文件
    scan_frequency: 30s      # 平衡扫描频率与性能
    clean_inactive: 168h     # 7天后清理注册表记录
    fields:
      log_type: "appkf"  # 添加自定义标签
output.logstash:
  hosts: ["192.168.110.83:5044"]  # 指向Logstash服务器

修改/usr/lib/systemd/system/filebeat.service

bash 复制代码
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/beats/filebeat
Wants=network-online.target
After=network-online.target

[Service]

UMask=0027
Environment="GODEBUG='madvdontneed=1'"
Environment="BEAT_LOG_OPTS="
#Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat.yml"
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/zyjkf.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat --path.logs /var/log/filebeat"
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always

[Install]
WantedBy=multi-user.target

将环境指向新的yml文件

6.3启动Filebeat

bash 复制代码
systemctl enable filebeat
systemctl start filebeat

七、docker compose部署elk

yml 复制代码
version: '3.8'

services:
  elasticsearch:
    image: elasticsearch:8.17.4
    container_name: elasticsearch
    user: "1000:1000"  
    environment:
      - discovery.type=single-node
      - bootstrap.memory_lock=true
      - xpack.security.enabled=false
      - ES_JAVA_OPTS=-Xms2g -Xmx2g
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /elkdata/data:/usr/share/elasticsearch/data
      - /elkdata/config:/usr/share/elasticsearch/config
    ports:
      - "9200:9200"
    networks:
      - elk

  logstash:
    image: logstash:8.17.4
    container_name: logstash
    environment:
      - config.reload.automatic=true
      - config.reload.interval=3s
    volumes:
      - /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
    ports:
      - "9600:9600"
      - "5044:5044"
    networks:
      - elk
    depends_on:
      - elasticsearch

  kibana:
    image: kibana:8.17.4
    container_name: kibana
    environment:
      - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
      - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=1c587411189db344f41a0cd98bc4e778
    ports:
      - "5601:5601"
    networks:
      - elk
    depends_on:
      - elasticsearch

networks:
  elk:
    driver: bridge
bash 复制代码
docker compose up -d
相关推荐
HUT_Tyne26539 分钟前
Linux 快速入门
linux·运维·服务器
鸠摩智首席音效师1 小时前
如何在 Linux 中使用 dd 命令 ?
linux·运维·服务器
JuiceFS2 小时前
JuiceFS sync 原理解析与性能优化,企业级数据同步利器
运维·后端
Logan Lie3 小时前
Web服务监听地址的取舍:0.0.0.0 vs 127.0.0.1
运维·后端
Y淑滢潇潇4 小时前
RHCE 防火墙实验
linux·运维·rhce
稻谷君W4 小时前
Ubuntu 远程访问 Win11 WSL2 并固定访问教程
linux·运维·ubuntu
泡沫·4 小时前
4.iSCSI 服务器
运维·服务器·数据库
悠悠121385 小时前
告别Zabbix?我用Netdata只花10分钟就搞定了50台服务器的秒级监控(保姆级实战)
运维·服务器·zabbix
天庭鸡腿哥6 小时前
大小只有4K的软件,可让系统瞬间丝滑!
运维·服务器·windows·microsoft·everything
虚伪的空想家6 小时前
华为昇腾Atlas 800 A2物理服务器开启VT-d模式
运维·服务器·ubuntu·kvm·vt-d·直通