一、准备镜像

二、创建Elasticsearch容器
2.1启动Elasticsearch容器
bash
docker run -d --name elasticsearch \
-e "discovery.type=single-node" \
-e "bootstrap.memory_lock=true" \
-e "ES_JAVA_OPTS=-Xms2g -Xmx2g" \
-e "xpack.security.enabled=true" \
-p 9200:9200 \
-v /elkdata/data:/usr/share/elasticsearch/data \
-v /elkdata/config:/usr/share/elasticsearch/config \
--ulimit memlock=-1:-1 \
--user "1000:1000" \
elasticsearch:8.17.4
2.2访问Elasticsearch
访问elasticsearch服务器http://192.168.110.83:9200/
三、创建logstash容器
3.1 创建配置文件
bash
mkdir -R /elkdata/logconfig/
cd /elkdata/logconfig/
vim logstash.conf
bash
input {
beats {
port => 5044
ssl => false
tags => ["beats_input"]
}
}
filter {
# fingerprint {
# source => ["@timestamp", "host", "message"]
# method => "MURMUR3" # 更高效哈希算法[6](@ref)
# target => "[@metadata][fingerprint]"
# key => "${DEDUPLICATION_KEY}" # 通过环境变量注入密钥
# }
grok {
match => {
"message" => "%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:loglevel} %{GREEDYDATA:message}"
}
tag_on_failure => ["_grokparsefailure"]
}
date {
match => ["timestamp", "ISO8601"]
timezone => "Asia/Shanghai" # 强制时区统一[3](@ref)
}
}
output {
if [fields][log_type] == "appkf" {
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "appkf-%{+YYYY.MM.dd}"
# document_id => "%{[@metadata][fingerprint]}"
# document_id => "%{[@metadata]}"
action => "create"
retry_on_conflict => 5
# ssl_certificate_verification => false
# dead_letter_queue_enable => true # 启用死信队列[5](@ref)
# dlq_path => "/var/logstash/dlq"
}
}
else if [fields][log_type] == "appcs" {
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "appcs-%{+YYYY.MM.dd}"
# document_id => "%{[@metadata][fingerprint]}"
# document_id => "%{[@metadata]}"
action => "create"
# ssl_certificate_verification => false
}
}
else if [fields][log_type] == "appsc" {
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "appsc-%{+YYYY.MM.dd}"
# document_id => "%{[@metadata][fingerprint]}"
# document_id => "%{[@metadata]}"
action => "create"
# ssl_certificate_verification => false
}
}
else {
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "unknown-%{+YYYY.MM.dd}"
# ssl_certificate_verification => false
}
}
# 开发调试输出
if "_grokparsefailure" in [tags] {
stdout { codec => rubydebug }
}
}
3.2.运行Logstash容器
bash
# 创建网络(若未创建)
docker network create elknet
bash
docker run -d --name logstash -p 5044:5044 -v /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf --net elknet logstash:8.17.4
3.3.验证logstash是否正常启动
bash
docker logs -f logstash
四、创建Kibana容器
4.1.运行Kibana容器
bash
docker run -d --name kibana -p 5601:5601 -e "ELASTICSEARCH_HOSTS=http://elasticsearch:9200" --link elasticsearch:elasticsearch kibana:8.17.4
4.2.验证kibana是否正常启动
bash
docker logs -f kibana
4.3 访问kibana

五、配置kibana
5.1 制造测试数据
python
from elasticsearch import Elasticsearch
from datetime import datetime
# 连接到 Elasticsearch
es = Elasticsearch(
["http://192.168.110.83:9200"]
)
# 定义日志内容
log_data = {
"timestamp": datetime.now(),
"level": "ERROR",
"message": "This is a test log message from Python"
}
# 写入 Elasticsearch 索引
response = es.index(index="test-logs", document=log_data)
print(response)
执行以下命令:
bash
pip install elasticsearch
python test.py
5.2.配置日志视图
点击discover

新建如下视图:


六、使用Filebeat作为日志采集器
6.1在所有目标服务器安装Filebeat
bash
# 导入GPG密钥
sudo rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
# 创建仓库配置文件
sudo tee /etc/yum.repos.d/elastic.repo <<EOF
[elasticsearch-8.x]
name=Elasticsearch repository for 8.x packages
baseurl=https://artifacts.elastic.co/packages/8.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF
bash
sudo yum update -y
sudo yum install filebeat -y
6.2配置Filebbeat
在/etc/filebeat/下新建一个yml文件
yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /www/wwwroot/zyj_sdev/logs/[0-9][0-9][0-9][0-9]-*/*.log # 指定日志路径
close_inactive: 2h # 覆盖日志滚动周期
ignore_older: 72h # 忽略3天前的文件
scan_frequency: 30s # 平衡扫描频率与性能
clean_inactive: 168h # 7天后清理注册表记录
fields:
log_type: "appkf" # 添加自定义标签
output.logstash:
hosts: ["192.168.110.83:5044"] # 指向Logstash服务器
修改/usr/lib/systemd/system/filebeat.service
bash
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/beats/filebeat
Wants=network-online.target
After=network-online.target
[Service]
UMask=0027
Environment="GODEBUG='madvdontneed=1'"
Environment="BEAT_LOG_OPTS="
#Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat.yml"
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/zyjkf.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat --path.logs /var/log/filebeat"
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always
[Install]
WantedBy=multi-user.target
将环境指向新的yml文件
6.3启动Filebeat
bash
systemctl enable filebeat
systemctl start filebeat
七、docker compose部署elk
yml
version: '3.8'
services:
elasticsearch:
image: elasticsearch:8.17.4
container_name: elasticsearch
user: "1000:1000"
environment:
- discovery.type=single-node
- bootstrap.memory_lock=true
- xpack.security.enabled=false
- ES_JAVA_OPTS=-Xms2g -Xmx2g
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /elkdata/data:/usr/share/elasticsearch/data
- /elkdata/config:/usr/share/elasticsearch/config
ports:
- "9200:9200"
networks:
- elk
logstash:
image: logstash:8.17.4
container_name: logstash
environment:
- config.reload.automatic=true
- config.reload.interval=3s
volumes:
- /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
ports:
- "9600:9600"
- "5044:5044"
networks:
- elk
depends_on:
- elasticsearch
kibana:
image: kibana:8.17.4
container_name: kibana
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
- XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=1c587411189db344f41a0cd98bc4e778
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
networks:
elk:
driver: bridge
bash
docker compose up -d