docker部署elk

一、准备镜像

二、创建Elasticsearch容器

2.1启动Elasticsearch容器

bash 复制代码
docker run -d --name elasticsearch \
  -e "discovery.type=single-node" \
  -e "bootstrap.memory_lock=true" \
  -e "ES_JAVA_OPTS=-Xms2g -Xmx2g" \
  -e "xpack.security.enabled=true" \
  -p 9200:9200 \
  -v /elkdata/data:/usr/share/elasticsearch/data \
  -v /elkdata/config:/usr/share/elasticsearch/config \
  --ulimit memlock=-1:-1 \
  --user "1000:1000" \
  elasticsearch:8.17.4

2.2访问Elasticsearch

访问elasticsearch服务器http://192.168.110.83:9200/

三、创建logstash容器

3.1 创建配置文件

bash 复制代码
mkdir -R /elkdata/logconfig/
cd /elkdata/logconfig/
vim logstash.conf
bash 复制代码
input {
  beats {
    port => 5044
    ssl => false
    tags => ["beats_input"]
  }
}

filter {
 # fingerprint {
 #   source => ["@timestamp", "host", "message"]
 #   method => "MURMUR3"  # 更高效哈希算法[6](@ref)
 #   target => "[@metadata][fingerprint]"
 #   key => "${DEDUPLICATION_KEY}"  # 通过环境变量注入密钥
 # }

  grok {
    match => { 
      "message" => "%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:loglevel} %{GREEDYDATA:message}" 
    }
    tag_on_failure => ["_grokparsefailure"]
  }

  date {
    match => ["timestamp", "ISO8601"]
    timezone => "Asia/Shanghai"  # 强制时区统一[3](@ref)
  }
}

output {
  if [fields][log_type] == "appkf" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appkf-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      retry_on_conflict => 5
      # ssl_certificate_verification => false
      # dead_letter_queue_enable => true  # 启用死信队列[5](@ref)
      # dlq_path => "/var/logstash/dlq"
    }
  }
  else if [fields][log_type] == "appcs" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appcs-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      # ssl_certificate_verification => false
    }
  }
  else if [fields][log_type] == "appsc" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appsc-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      # ssl_certificate_verification => false
    }
  }
  else {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "unknown-%{+YYYY.MM.dd}"
      # ssl_certificate_verification => false

    }
  }

  # 开发调试输出
  if "_grokparsefailure" in [tags] {
    stdout { codec => rubydebug }
  }
}

3.2.运行Logstash容器

bash 复制代码
# 创建网络(若未创建)
docker network create elknet
bash 复制代码
docker run -d   --name logstash   -p 5044:5044   -v /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf   --net elknet  logstash:8.17.4

3.3.验证logstash是否正常启动

bash 复制代码
 docker logs -f logstash

四、创建Kibana容器

4.1.运行Kibana容器

bash 复制代码
docker run -d   --name kibana   -p 5601:5601   -e "ELASTICSEARCH_HOSTS=http://elasticsearch:9200"   --link elasticsearch:elasticsearch   kibana:8.17.4

4.2.验证kibana是否正常启动

bash 复制代码
docker logs -f kibana

4.3 访问kibana

http://192.168.110.83:5601/

五、配置kibana

5.1 制造测试数据

test.py

python 复制代码
from elasticsearch import Elasticsearch
from datetime import datetime

# 连接到 Elasticsearch
es = Elasticsearch(
    ["http://192.168.110.83:9200"]
)

# 定义日志内容
log_data = {
    "timestamp": datetime.now(),
    "level": "ERROR",
    "message": "This is a test log message from Python"
}

# 写入 Elasticsearch 索引
response = es.index(index="test-logs", document=log_data)
print(response)

执行以下命令:

bash 复制代码
pip install elasticsearch
python test.py

5.2.配置日志视图

点击discover

新建如下视图:

六、使用Filebeat作为日志采集器

6.1在所有目标服务器安装Filebeat

bash 复制代码
# 导入GPG密钥
sudo rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

# 创建仓库配置文件
sudo tee /etc/yum.repos.d/elastic.repo <<EOF
[elasticsearch-8.x]
name=Elasticsearch repository for 8.x packages
baseurl=https://artifacts.elastic.co/packages/8.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF
bash 复制代码
sudo yum update -y
sudo yum install filebeat -y

6.2配置Filebbeat

在/etc/filebeat/下新建一个yml文件

yml 复制代码
filebeat.inputs:
  - type: log
    enabled: true
    paths:
      - /www/wwwroot/zyj_sdev/logs/[0-9][0-9][0-9][0-9]-*/*.log  # 指定日志路径
    close_inactive: 2h       # 覆盖日志滚动周期
    ignore_older: 72h        # 忽略3天前的文件
    scan_frequency: 30s      # 平衡扫描频率与性能
    clean_inactive: 168h     # 7天后清理注册表记录
    fields:
      log_type: "appkf"  # 添加自定义标签
output.logstash:
  hosts: ["192.168.110.83:5044"]  # 指向Logstash服务器

修改/usr/lib/systemd/system/filebeat.service

bash 复制代码
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/beats/filebeat
Wants=network-online.target
After=network-online.target

[Service]

UMask=0027
Environment="GODEBUG='madvdontneed=1'"
Environment="BEAT_LOG_OPTS="
#Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat.yml"
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/zyjkf.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat --path.logs /var/log/filebeat"
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always

[Install]
WantedBy=multi-user.target

将环境指向新的yml文件

6.3启动Filebeat

bash 复制代码
systemctl enable filebeat
systemctl start filebeat

七、docker compose部署elk

yml 复制代码
version: '3.8'

services:
  elasticsearch:
    image: elasticsearch:8.17.4
    container_name: elasticsearch
    user: "1000:1000"  
    environment:
      - discovery.type=single-node
      - bootstrap.memory_lock=true
      - xpack.security.enabled=false
      - ES_JAVA_OPTS=-Xms2g -Xmx2g
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /elkdata/data:/usr/share/elasticsearch/data
      - /elkdata/config:/usr/share/elasticsearch/config
    ports:
      - "9200:9200"
    networks:
      - elk

  logstash:
    image: logstash:8.17.4
    container_name: logstash
    environment:
      - config.reload.automatic=true
      - config.reload.interval=3s
    volumes:
      - /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
    ports:
      - "9600:9600"
      - "5044:5044"
    networks:
      - elk
    depends_on:
      - elasticsearch

  kibana:
    image: kibana:8.17.4
    container_name: kibana
    environment:
      - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
      - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=1c587411189db344f41a0cd98bc4e778
    ports:
      - "5601:5601"
    networks:
      - elk
    depends_on:
      - elasticsearch

networks:
  elk:
    driver: bridge
bash 复制代码
docker compose up -d
相关推荐
GalaxyPokemon1 小时前
LINUX基础 [二] - Linux常见指令
linux·运维·服务器
问道飞鱼2 小时前
【linux知识】web服务环境搭建(一):用户以及开发环境初始化
linux·运维·服务器
CAE虚拟与现实2 小时前
WSL2安装多个版本的Ubuntu
linux·运维·ubuntu·wsl·wsl2
__Smile°2 小时前
修改 docker 工作目录
运维·docker·容器
_丿丨丨_3 小时前
linux下的目录文件管理和基本文件管理的基本操作
linux·运维·服务器
JovaZou4 小时前
n8n 本地部署及实践应用,实现零成本自动化运营 Telegram 频道(保证好使)
运维·人工智能·docker·ai·自然语言处理·自动化·llama
邹卓为4 小时前
Jenkins 发送钉钉消息
运维·jenkins·钉钉
秃头的赌徒4 小时前
Docker 前瞻
linux·运维·服务器
DC_BLOG5 小时前
IS-IS中特殊字段——OL过载
运维·华为·智能路由器
随行就市5 小时前
python爬虫
运维·服务器·爬虫