ELK
集成是没有代码侵入的,主要是吃服务器内存,只需要部署启动这三个服务,然后项目的资源日志配置指定日志输出到 logstash
服务器就可以了。
1、好处就是开发人员不用依赖服务器来定位异常了,服务器一般需要借助VPN登录,而且也不算所有开发人员都有分配。
2、对于微服务多模块项目,精准配置日志输出格式,对定位问题非常方便,不用再拘泥与多个服务模块在哪台服务器再去定位问题了。
1、环境配置
- 1.1、下载
logstash
shell
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.1.0.tar.gz
- 1.2、下载
kibana
shell
https://artifacts.elastic.co/downloads/kibana/kibana-7.1.0-linux-x86_64.tar.gz
- 1.3、下载
eleasticsearch
shell
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.1.0-linux-x86_64.tar.gz
- 1.4、配置
elasticsearch.yml
环境
修改 elasticsearch.yml
yml
#network.host: 192.168.0.1
network.host: 0.0.0.0
#
# Set a custom port for HTTP:
#
#http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
discovery.seed_hosts: ["127.0.0.1"]
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
cluster.initial_master_nodes: ["node-1"]
- 1.5、配置
logstash.conf
拷贝 logstash-sample.conf
并命名为 logstash.conf
,内容为
conf
# Sample Logstash configuration for creating a simple
# Beats -> Logstash -> Elasticsearch pipeline.
input {
tcp {
port => 4560
codec => "json"
}
}
output {
elasticsearch {
hosts => ["127.0.0.1:9200"]
index => "logstash-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
}
stdout { codec => rubydebug }
}
修改 logstash.yml
yml
# 具体 以项目为准,不要直接暴露到外网
http.host: "0.0.0.0"
- 1.6 修改
kibana.yml
yml
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "0.0.0.0"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
# from requests it receives, and to prevent a deprecation warning at startup.
# This setting cannot end in a slash.
#server.basePath: ""
...
# Specifies locale to be used for all localizable strings, dates and number formats.
#i18n.locale: "en"
i18n.locale: "zh-CN"
2、SpringBoot 集成
- 2.1 配置
maven
依赖
xml
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.3</version>
</dependency>
- 2.2、配置
logback.xml
xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- 项目名 -->
<property name="project.name" value="fyupeng-blog-api-02" />
<!-- 日志输出文件路径配置 -->
<property name="log.path" value="/usr/local/jarList/blog/api/logs/" />
<!-- 日志历史保留天数 -->
<property name="maxHistory" value="60"/>
<!-- 最大输出文件大小 -->
<property name="maxFileSize" value="10MB"/>
<!-- 日志输出格式 -->
<property name="encoder.pattern" value="%d{yyyy/MM/dd HH:mm:ss.SSS} %-5level [%thread] [%c{0}:%L] : %msg%n"/>
<appender name="logstash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!-- 这里就是配置 logstash 的地址,logback 能够将日志推送 给 这个接口
logstash 格式化后,再推送给 elasticsearch,然后Kibana 去 es 中拉取数据展示到 Web 页面 -->
<destination>192.168.10.100:4560</destination>
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
<customFields>{"appname": "${springApplicationName}"}</customFields>
</encoder>
</appender>
<!-- 日志输出控制台 -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${encoder.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- 日志彩色输出控制台 -->
<!-- 引入spirng boot默认的logback配置文件 -->
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<appender name="COLOR_CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<!-- 采用Spring boot中默认的控制台彩色日志输出模板 -->
<encoder>
<pattern>${encoder.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--输出到文件-->
<!-- 时间滚动输出 level为 所有 日志 -->
<appender name="FILE_All" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/${project.name}/log_all.log</file>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/${project.name}/all/log-all-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<maxHistory>${maxHistory}</maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<pattern>${encoder.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--输出到文件-->
<!-- 时间滚动输出 level为 INFO 日志 -->
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/${project.name}/log_info.log</file>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 每天日志归档路径以及格式 -->
<fileNamePattern>${log.path}/${project.name}/info/log-info-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<!--日志文件输出格式-->
<encoder>
<pattern>${encoder.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 此日志文件只记录info级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!--输出到文件-->
<!-- 时间滚动输出 level为 DEBUG 日志 -->
<appender name="FILE_DEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/${project.name}/log_debug.log</file>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 每天日志归档路径以及格式 -->
<fileNamePattern>${log.path}/${project.name}/debug/log-debug-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<!--日志文件输出格式-->
<encoder>
<pattern>${encoder.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 此日志文件只记录 DEBUG 级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>DEBUG</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!--输出到文件-->
<!-- 时间滚动输出 level为 ERROR 日志 -->
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/${project.name}/log_error.log</file>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 每天日志归档路径以及格式 -->
<fileNamePattern>${log.path}/${project.name}/error/log-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<!--日志文件输出格式-->
<encoder>
<pattern>${encoder.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 此日志文件只记录 error 级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="DRUID_MONITOR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/${project.name}/monitor/druid-monitor-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<maxHistory>${maxHistory}</maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<pattern>${encoder.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--配置定时任务DruidLogTask的监控日志输出-->
<logger name="cn.fyupeng.task.DruidLogTask" level="DEBUG" additivity="false">
<appender-ref ref="DRUID_MONITOR" />
</logger>
<!-- 为 error 级别单独配置一个 logger,只引用 logstash appender -->
<logger name="cn.fyupeng" level="ERROR">
<appender-ref ref="logstash"/>
<appender-ref ref="FILE_ERROR"/> <!-- 如果错误日志也需要写入 FILE_ERROR -->
</logger>
<root level="info">
<appender-ref ref="COLOR_CONSOLE"/>
<appender-ref ref="FILE_INFO"/>
</root>
</configuration>