docker部署elk

一、准备镜像

二、创建Elasticsearch容器

2.1启动Elasticsearch容器

bash 复制代码
docker run -d --name elasticsearch \
  -e "discovery.type=single-node" \
  -e "bootstrap.memory_lock=true" \
  -e "ES_JAVA_OPTS=-Xms2g -Xmx2g" \
  -e "xpack.security.enabled=true" \
  -p 9200:9200 \
  -v /elkdata/data:/usr/share/elasticsearch/data \
  -v /elkdata/config:/usr/share/elasticsearch/config \
  --ulimit memlock=-1:-1 \
  --user "1000:1000" \
  elasticsearch:8.17.4

2.2访问Elasticsearch

访问elasticsearch服务器http://192.168.110.83:9200/

三、创建logstash容器

3.1 创建配置文件

bash 复制代码
mkdir -R /elkdata/logconfig/
cd /elkdata/logconfig/
vim logstash.conf
bash 复制代码
input {
  beats {
    port => 5044
    ssl => false
    tags => ["beats_input"]
  }
}

filter {
 # fingerprint {
 #   source => ["@timestamp", "host", "message"]
 #   method => "MURMUR3"  # 更高效哈希算法[6](@ref)
 #   target => "[@metadata][fingerprint]"
 #   key => "${DEDUPLICATION_KEY}"  # 通过环境变量注入密钥
 # }

  grok {
    match => { 
      "message" => "%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:loglevel} %{GREEDYDATA:message}" 
    }
    tag_on_failure => ["_grokparsefailure"]
  }

  date {
    match => ["timestamp", "ISO8601"]
    timezone => "Asia/Shanghai"  # 强制时区统一[3](@ref)
  }
}

output {
  if [fields][log_type] == "appkf" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appkf-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      retry_on_conflict => 5
      # ssl_certificate_verification => false
      # dead_letter_queue_enable => true  # 启用死信队列[5](@ref)
      # dlq_path => "/var/logstash/dlq"
    }
  }
  else if [fields][log_type] == "appcs" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appcs-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      # ssl_certificate_verification => false
    }
  }
  else if [fields][log_type] == "appsc" {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "appsc-%{+YYYY.MM.dd}"
      # document_id => "%{[@metadata][fingerprint]}"
      # document_id => "%{[@metadata]}"
      action => "create"
      # ssl_certificate_verification => false
    }
  }
  else {
    elasticsearch {
      hosts => ["http://elasticsearch:9200"]
      index => "unknown-%{+YYYY.MM.dd}"
      # ssl_certificate_verification => false

    }
  }

  # 开发调试输出
  if "_grokparsefailure" in [tags] {
    stdout { codec => rubydebug }
  }
}

3.2.运行Logstash容器

bash 复制代码
# 创建网络(若未创建)
docker network create elknet
bash 复制代码
docker run -d   --name logstash   -p 5044:5044   -v /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf   --net elknet  logstash:8.17.4

3.3.验证logstash是否正常启动

bash 复制代码
 docker logs -f logstash

四、创建Kibana容器

4.1.运行Kibana容器

bash 复制代码
docker run -d   --name kibana   -p 5601:5601   -e "ELASTICSEARCH_HOSTS=http://elasticsearch:9200"   --link elasticsearch:elasticsearch   kibana:8.17.4

4.2.验证kibana是否正常启动

bash 复制代码
docker logs -f kibana

4.3 访问kibana

http://192.168.110.83:5601/

五、配置kibana

5.1 制造测试数据

test.py

python 复制代码
from elasticsearch import Elasticsearch
from datetime import datetime

# 连接到 Elasticsearch
es = Elasticsearch(
    ["http://192.168.110.83:9200"]
)

# 定义日志内容
log_data = {
    "timestamp": datetime.now(),
    "level": "ERROR",
    "message": "This is a test log message from Python"
}

# 写入 Elasticsearch 索引
response = es.index(index="test-logs", document=log_data)
print(response)

执行以下命令:

bash 复制代码
pip install elasticsearch
python test.py

5.2.配置日志视图

点击discover

新建如下视图:

六、使用Filebeat作为日志采集器

6.1在所有目标服务器安装Filebeat

bash 复制代码
# 导入GPG密钥
sudo rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

# 创建仓库配置文件
sudo tee /etc/yum.repos.d/elastic.repo <<EOF
[elasticsearch-8.x]
name=Elasticsearch repository for 8.x packages
baseurl=https://artifacts.elastic.co/packages/8.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF
bash 复制代码
sudo yum update -y
sudo yum install filebeat -y

6.2配置Filebbeat

在/etc/filebeat/下新建一个yml文件

yml 复制代码
filebeat.inputs:
  - type: log
    enabled: true
    paths:
      - /www/wwwroot/zyj_sdev/logs/[0-9][0-9][0-9][0-9]-*/*.log  # 指定日志路径
    close_inactive: 2h       # 覆盖日志滚动周期
    ignore_older: 72h        # 忽略3天前的文件
    scan_frequency: 30s      # 平衡扫描频率与性能
    clean_inactive: 168h     # 7天后清理注册表记录
    fields:
      log_type: "appkf"  # 添加自定义标签
output.logstash:
  hosts: ["192.168.110.83:5044"]  # 指向Logstash服务器

修改/usr/lib/systemd/system/filebeat.service

bash 复制代码
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/beats/filebeat
Wants=network-online.target
After=network-online.target

[Service]

UMask=0027
Environment="GODEBUG='madvdontneed=1'"
Environment="BEAT_LOG_OPTS="
#Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat.yml"
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/zyjkf.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat --path.logs /var/log/filebeat"
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always

[Install]
WantedBy=multi-user.target

将环境指向新的yml文件

6.3启动Filebeat

bash 复制代码
systemctl enable filebeat
systemctl start filebeat

七、docker compose部署elk

yml 复制代码
version: '3.8'

services:
  elasticsearch:
    image: elasticsearch:8.17.4
    container_name: elasticsearch
    user: "1000:1000"  
    environment:
      - discovery.type=single-node
      - bootstrap.memory_lock=true
      - xpack.security.enabled=false
      - ES_JAVA_OPTS=-Xms2g -Xmx2g
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /elkdata/data:/usr/share/elasticsearch/data
      - /elkdata/config:/usr/share/elasticsearch/config
    ports:
      - "9200:9200"
    networks:
      - elk

  logstash:
    image: logstash:8.17.4
    container_name: logstash
    environment:
      - config.reload.automatic=true
      - config.reload.interval=3s
    volumes:
      - /elkdata/logconfig/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
    ports:
      - "9600:9600"
      - "5044:5044"
    networks:
      - elk
    depends_on:
      - elasticsearch

  kibana:
    image: kibana:8.17.4
    container_name: kibana
    environment:
      - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
      - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=1c587411189db344f41a0cd98bc4e778
    ports:
      - "5601:5601"
    networks:
      - elk
    depends_on:
      - elasticsearch

networks:
  elk:
    driver: bridge
bash 复制代码
docker compose up -d
相关推荐
七夜zippoe7 小时前
CANN Runtime任务描述序列化与持久化源码深度解码
大数据·运维·服务器·cann
Fcy6489 小时前
Linux下 进程(一)(冯诺依曼体系、操作系统、进程基本概念与基本操作)
linux·运维·服务器·进程
袁袁袁袁满9 小时前
Linux怎么查看最新下载的文件
linux·运维·服务器
代码游侠9 小时前
学习笔记——设备树基础
linux·运维·开发语言·单片机·算法
Harvey9039 小时前
通过 Helm 部署 Nginx 应用的完整标准化步骤
linux·运维·nginx·k8s
珠海西格电力科技10 小时前
微电网能量平衡理论的实现条件在不同场景下有哪些差异?
运维·服务器·网络·人工智能·云计算·智慧城市
释怀不想释怀10 小时前
Linux环境变量
linux·运维·服务器
zzzsde11 小时前
【Linux】进程(4):进程优先级&&调度队列
linux·运维·服务器
聆风吟º12 小时前
CANN开源项目实战指南:使用oam-tools构建自动化故障诊断与运维可观测性体系
运维·开源·自动化·cann
NPE~12 小时前
自动化工具Drissonpage 保姆级教程(含xpath语法)
运维·后端·爬虫·自动化·网络爬虫·xpath·浏览器自动化