jvm接入prometheus监控

  1. 创建以下两个配置类:
java 复制代码
package com.haoze.doctor.config;
import com.alibaba.druid.pool.DruidDataSource;
import io.micrometer.core.instrument.Gauge;
import io.micrometer.core.instrument.MeterRegistry;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.function.ToDoubleFunction;
@Component
public class DruidCollector {
    @Value("${spring.application.name}")
    private String applicationName;
    private static final String LABEL_NAME = "druid_pool";
    private final MeterRegistry registry;
    DruidCollector(MeterRegistry registry) {
        this.registry = registry;
    }
    void register(List<DruidDataSource> dataSources) {
        dataSources.forEach((druidDataSource) -> {
            // basic configurations
            createGauge(druidDataSource, "druid_initial_size", "Initial size", (datasource) -> (double) druidDataSource.getInitialSize());
            createGauge(druidDataSource, "druid_min_idle", "Min idle", datasource -> (double) druidDataSource.getMinIdle());
            createGauge(druidDataSource, "druid_max_active", "Max active", datasource -> (double) druidDataSource.getMaxActive());
            // connection pool core metrics
            createGauge(druidDataSource, "druid_active_count", "Active count", datasource -> (double) druidDataSource.getActiveCount());
            createGauge(druidDataSource, "druid_active_peak", "Active peak", datasource -> (double) druidDataSource.getActivePeak());
            createGauge(druidDataSource, "druid_pooling_peak", "Pooling peak", datasource -> (double) druidDataSource.getPoolingPeak());
            createGauge(druidDataSource, "druid_pooling_count", "Pooling count", datasource -> (double) druidDataSource.getPoolingCount());
            createGauge(druidDataSource, "druid_wait_thread_count", "Wait thread count", datasource -> (double) druidDataSource.getWaitThreadCount());
            // connection pool detail metrics
            createGauge(druidDataSource, "druid_not_empty_wait_count", "Not empty wait count", datasource -> (double) druidDataSource.getNotEmptyWaitCount());
            createGauge(druidDataSource, "druid_not_empty_wait_millis", "Not empty wait millis", datasource -> (double) druidDataSource.getNotEmptyWaitMillis());
            createGauge(druidDataSource, "druid_not_empty_thread_count", "Not empty thread count", datasource -> (double) druidDataSource.getNotEmptyWaitThreadCount());
            createGauge(druidDataSource, "druid_logic_connect_count", "Logic connect count", datasource -> (double) druidDataSource.getConnectCount());
            createGauge(druidDataSource, "druid_logic_close_count", "Logic close count", datasource -> (double) druidDataSource.getCloseCount());
            createGauge(druidDataSource, "druid_logic_connect_error_count", "Logic connect error count", datasource -> (double) druidDataSource.getConnectErrorCount());
            createGauge(druidDataSource, "druid_physical_connect_count", "Physical connect count", datasource -> (double) druidDataSource.getCreateCount());
            createGauge(druidDataSource, "druid_physical_close_count", "Physical close count", datasource -> (double) druidDataSource.getDestroyCount());
            createGauge(druidDataSource, "druid_physical_connect_error_count", "Physical connect error count", datasource -> (double) druidDataSource.getCreateErrorCount());
            // sql execution core metrics
            createGauge(druidDataSource, "druid_error_count", "Error count", datasource -> (double) druidDataSource.getErrorCount());
            createGauge(druidDataSource, "druid_execute_count", "Execute count", datasource -> (double) druidDataSource.getExecuteCount());
            // transaction metrics
            createGauge(druidDataSource, "druid_start_transaction_count", "Start transaction count", datasource -> (double) druidDataSource.getStartTransactionCount());
            createGauge(druidDataSource, "druid_commit_count", "Commit count", datasource -> (double) druidDataSource.getCommitCount());
            createGauge(druidDataSource, "druid_rollback_count", "Rollback count", datasource -> (double) druidDataSource.getRollbackCount());
            // sql execution detail
            createGauge(druidDataSource, "druid_prepared_statement_open_count", "Prepared statement open count", datasource -> (double) druidDataSource.getPreparedStatementCount());
            createGauge(druidDataSource, "druid_prepared_statement_closed_count", "Prepared statement closed count", datasource -> (double) druidDataSource.getClosedPreparedStatementCount());
            createGauge(druidDataSource, "druid_ps_cache_access_count", "PS cache access count", datasource -> (double) druidDataSource.getCachedPreparedStatementAccessCount());
            createGauge(druidDataSource, "druid_ps_cache_hit_count", "PS cache hit count", datasource -> (double) druidDataSource.getCachedPreparedStatementHitCount());
            createGauge(druidDataSource, "druid_ps_cache_miss_count", "PS cache miss count", datasource -> (double) druidDataSource.getCachedPreparedStatementMissCount());
            createGauge(druidDataSource, "druid_execute_query_count", "Execute query count", datasource -> (double) druidDataSource.getExecuteQueryCount());
            createGauge(druidDataSource, "druid_execute_update_count", "Execute update count", datasource -> (double) druidDataSource.getExecuteUpdateCount());
            createGauge(druidDataSource, "druid_execute_batch_count", "Execute batch count", datasource -> (double) druidDataSource.getExecuteBatchCount());
            // none core metrics, some are static configurations
            createGauge(druidDataSource, "druid_max_wait", "Max wait", datasource -> (double) druidDataSource.getMaxWait());
            createGauge(druidDataSource, "druid_max_wait_thread_count", "Max wait thread count", datasource -> (double) druidDataSource.getMaxWaitThreadCount());
            createGauge(druidDataSource, "druid_login_timeout", "Login timeout", datasource -> (double) druidDataSource.getLoginTimeout());
            createGauge(druidDataSource, "druid_query_timeout", "Query timeout", datasource -> (double) druidDataSource.getQueryTimeout());
            createGauge(druidDataSource, "druid_transaction_query_timeout", "Transaction query timeout", datasource -> (double) druidDataSource.getTransactionQueryTimeout());
        });
    }
    private void createGauge(DruidDataSource weakRef, String metric, String help, ToDoubleFunction<DruidDataSource> measure) {
        Gauge.builder(metric, weakRef, measure)
                .description(help)
                .tag(LABEL_NAME, weakRef.getUsername() + "-" + weakRef.getUrl())
                .tag("application", applicationName)
                .register(this.registry);
    }
}
java 复制代码
package com.haoze.doctor.config;
import com.alibaba.druid.pool.DruidDataSource;
import io.micrometer.core.instrument.MeterRegistry;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@Configuration
@ConditionalOnClass({DruidDataSource.class, MeterRegistry.class})
@Slf4j
public class MetricsConfiguration {
    @Value("${spring.application.name}")
    private String applicationName;
    private final MeterRegistry registry;
    @Autowired
    private DruidCollector druidCollector;
    public MetricsConfiguration(MeterRegistry registry) {
        this.registry = registry;
    }
    @Bean
    MeterRegistryCustomizer<MeterRegistry> configurer() {
        return (registry) -> registry.config().commonTags("application", applicationName);
    }
    @Autowired
    public void bindMetricsRegistryToDruidDataSources(Collection<DataSource> dataSources) throws SQLException {
        List<DruidDataSource> druidDataSources = new ArrayList<>(dataSources.size());
        for (DataSource dataSource : dataSources) {
            DruidDataSource druidDataSource = dataSource.unwrap(DruidDataSource.class);
            if (druidDataSource != null) {
                druidDataSources.add(druidDataSource);
            }
        }
//        DruidCollector druidCollector = new DruidCollector(druidDataSources, registry);
        druidCollector.register(druidDataSources);
        log.info("finish register metrics to micrometer");
    }
}
  1. pom文件中的druid版本需要在这个以上:
xml 复制代码
     <dependency>
         <groupId>com.alibaba</groupId>
         <artifactId>druid</artifactId>
         <version>1.1.14</version>
     </dependency>

其他需要引入的maven依赖:

xml 复制代码
     <dependency>
         <groupId>org.springframework.boot</groupId>
         <artifactId>spring-boot-starter-actuator</artifactId>
     </dependency>
     <dependency>
         <groupId>io.micrometer</groupId>
         <artifactId>micrometer-registry-prometheus</artifactId>
     </dependency>
     <!-- micrometer获取JVM相关信息,并展示在Grafana上 -->
     <dependency>
         <groupId>io.github.mweirauch</groupId>
         <artifactId>micrometer-jvm-extras</artifactId>
         <version>0.2.2</version>
     </dependency>
  1. actuator配置需要支持prometheus
yml 复制代码
management:
endpoints:
 web:
   exposure:
     include: "httptrace,health,shutdown,prometheus"
 shutdown:
   enabled: true
 health:
   show-details: ALWAYS
  1. 本地验证:
    5. 对于普通的部署方式,到这一步之后,我们直接到prometheus上去配置ip和端口就可以实现监控了,像下面这样在prometheus.yml配置文件中进行配置,并重启服务prometheus服务即可。
yaml 复制代码
    #新增jvm监控任务  
  - job_name: "jvm"
    # 采集数据间隔时间
    scrape_interval: 5s
    # 采集时的超时时间
    scrape_timeout: 5s
    # 采集数据的路径
    metrics_path: '/actuator/prometheus'
    # 应用服务的地址
    static_configs:
      - targets: ['169.169.169.98:8203']
  1. 但对于容器化部署,这种ip会变的场景下,需要借助prometheus的注册发现来实现了,比如这里我们通过nacos注册发现。nacos的application.properties文件中进行修改,修改后重启nacos服务
yaml 复制代码
## 配置设置为true
nacos.prometheus.metrics.enabled=true

然后可以通过 http://ip:8848/nacos/prometheus/,查看注册内容。

  1. prometheus配置自动发现。修改prometheus的prometheus.yml,添加以下内容:
yaml 复制代码
   - job_name: 'prod-jvm'
     metrics_path: /actuator/prometheus
     scheme: http
     http_sd_configs:
      ## namespaceId/prod 表示指定prod这个命名空间下的
      ## 如果不指定命名空间,可以省去这部分,直接:http://ip:8848/nacos/prometheus/
     - url: http://ip:8848/nacos/prometheus/namespaceId/prod
相关推荐
nenchoumi311932 分钟前
AirSim/Cosys-AirSim 游戏开发(一)XBox 手柄 Windows + python 连接与读取
windows·python·xbox
love530love2 小时前
【PyCharm必会基础】正确移除解释器及虚拟环境(以 Poetry 为例 )
开发语言·ide·windows·笔记·python·pycharm
黄交大彭于晏3 小时前
发送文件脚本源码版本
java·linux·windows
鸽子炖汤3 小时前
Java中==和equals的区别
java·开发语言·jvm
gadiaola8 小时前
【JVM】Java虚拟机(二)——垃圾回收
java·jvm
vfvfb13 小时前
bat批量去掉本文件夹中的文件扩展名
服务器·windows·批处理·删除扩展名·bat技巧
尘土哥18 小时前
JVM 垃圾回收器 详解
jvm·垃圾回收器
黄雪超18 小时前
JVM——打开JVM后门的钥匙:反射机制
java·开发语言·jvm
Rocky40118 小时前
JAVAEE->多线程:锁策略
java·开发语言·jvm
我命由我1234519 小时前
VSCode - VSCode 放大与缩小代码
前端·ide·windows·vscode·前端框架·编辑器·软件工具