jvm接入prometheus监控

  1. 创建以下两个配置类:
java 复制代码
package com.haoze.doctor.config;
import com.alibaba.druid.pool.DruidDataSource;
import io.micrometer.core.instrument.Gauge;
import io.micrometer.core.instrument.MeterRegistry;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.function.ToDoubleFunction;
@Component
public class DruidCollector {
    @Value("${spring.application.name}")
    private String applicationName;
    private static final String LABEL_NAME = "druid_pool";
    private final MeterRegistry registry;
    DruidCollector(MeterRegistry registry) {
        this.registry = registry;
    }
    void register(List<DruidDataSource> dataSources) {
        dataSources.forEach((druidDataSource) -> {
            // basic configurations
            createGauge(druidDataSource, "druid_initial_size", "Initial size", (datasource) -> (double) druidDataSource.getInitialSize());
            createGauge(druidDataSource, "druid_min_idle", "Min idle", datasource -> (double) druidDataSource.getMinIdle());
            createGauge(druidDataSource, "druid_max_active", "Max active", datasource -> (double) druidDataSource.getMaxActive());
            // connection pool core metrics
            createGauge(druidDataSource, "druid_active_count", "Active count", datasource -> (double) druidDataSource.getActiveCount());
            createGauge(druidDataSource, "druid_active_peak", "Active peak", datasource -> (double) druidDataSource.getActivePeak());
            createGauge(druidDataSource, "druid_pooling_peak", "Pooling peak", datasource -> (double) druidDataSource.getPoolingPeak());
            createGauge(druidDataSource, "druid_pooling_count", "Pooling count", datasource -> (double) druidDataSource.getPoolingCount());
            createGauge(druidDataSource, "druid_wait_thread_count", "Wait thread count", datasource -> (double) druidDataSource.getWaitThreadCount());
            // connection pool detail metrics
            createGauge(druidDataSource, "druid_not_empty_wait_count", "Not empty wait count", datasource -> (double) druidDataSource.getNotEmptyWaitCount());
            createGauge(druidDataSource, "druid_not_empty_wait_millis", "Not empty wait millis", datasource -> (double) druidDataSource.getNotEmptyWaitMillis());
            createGauge(druidDataSource, "druid_not_empty_thread_count", "Not empty thread count", datasource -> (double) druidDataSource.getNotEmptyWaitThreadCount());
            createGauge(druidDataSource, "druid_logic_connect_count", "Logic connect count", datasource -> (double) druidDataSource.getConnectCount());
            createGauge(druidDataSource, "druid_logic_close_count", "Logic close count", datasource -> (double) druidDataSource.getCloseCount());
            createGauge(druidDataSource, "druid_logic_connect_error_count", "Logic connect error count", datasource -> (double) druidDataSource.getConnectErrorCount());
            createGauge(druidDataSource, "druid_physical_connect_count", "Physical connect count", datasource -> (double) druidDataSource.getCreateCount());
            createGauge(druidDataSource, "druid_physical_close_count", "Physical close count", datasource -> (double) druidDataSource.getDestroyCount());
            createGauge(druidDataSource, "druid_physical_connect_error_count", "Physical connect error count", datasource -> (double) druidDataSource.getCreateErrorCount());
            // sql execution core metrics
            createGauge(druidDataSource, "druid_error_count", "Error count", datasource -> (double) druidDataSource.getErrorCount());
            createGauge(druidDataSource, "druid_execute_count", "Execute count", datasource -> (double) druidDataSource.getExecuteCount());
            // transaction metrics
            createGauge(druidDataSource, "druid_start_transaction_count", "Start transaction count", datasource -> (double) druidDataSource.getStartTransactionCount());
            createGauge(druidDataSource, "druid_commit_count", "Commit count", datasource -> (double) druidDataSource.getCommitCount());
            createGauge(druidDataSource, "druid_rollback_count", "Rollback count", datasource -> (double) druidDataSource.getRollbackCount());
            // sql execution detail
            createGauge(druidDataSource, "druid_prepared_statement_open_count", "Prepared statement open count", datasource -> (double) druidDataSource.getPreparedStatementCount());
            createGauge(druidDataSource, "druid_prepared_statement_closed_count", "Prepared statement closed count", datasource -> (double) druidDataSource.getClosedPreparedStatementCount());
            createGauge(druidDataSource, "druid_ps_cache_access_count", "PS cache access count", datasource -> (double) druidDataSource.getCachedPreparedStatementAccessCount());
            createGauge(druidDataSource, "druid_ps_cache_hit_count", "PS cache hit count", datasource -> (double) druidDataSource.getCachedPreparedStatementHitCount());
            createGauge(druidDataSource, "druid_ps_cache_miss_count", "PS cache miss count", datasource -> (double) druidDataSource.getCachedPreparedStatementMissCount());
            createGauge(druidDataSource, "druid_execute_query_count", "Execute query count", datasource -> (double) druidDataSource.getExecuteQueryCount());
            createGauge(druidDataSource, "druid_execute_update_count", "Execute update count", datasource -> (double) druidDataSource.getExecuteUpdateCount());
            createGauge(druidDataSource, "druid_execute_batch_count", "Execute batch count", datasource -> (double) druidDataSource.getExecuteBatchCount());
            // none core metrics, some are static configurations
            createGauge(druidDataSource, "druid_max_wait", "Max wait", datasource -> (double) druidDataSource.getMaxWait());
            createGauge(druidDataSource, "druid_max_wait_thread_count", "Max wait thread count", datasource -> (double) druidDataSource.getMaxWaitThreadCount());
            createGauge(druidDataSource, "druid_login_timeout", "Login timeout", datasource -> (double) druidDataSource.getLoginTimeout());
            createGauge(druidDataSource, "druid_query_timeout", "Query timeout", datasource -> (double) druidDataSource.getQueryTimeout());
            createGauge(druidDataSource, "druid_transaction_query_timeout", "Transaction query timeout", datasource -> (double) druidDataSource.getTransactionQueryTimeout());
        });
    }
    private void createGauge(DruidDataSource weakRef, String metric, String help, ToDoubleFunction<DruidDataSource> measure) {
        Gauge.builder(metric, weakRef, measure)
                .description(help)
                .tag(LABEL_NAME, weakRef.getUsername() + "-" + weakRef.getUrl())
                .tag("application", applicationName)
                .register(this.registry);
    }
}
java 复制代码
package com.haoze.doctor.config;
import com.alibaba.druid.pool.DruidDataSource;
import io.micrometer.core.instrument.MeterRegistry;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@Configuration
@ConditionalOnClass({DruidDataSource.class, MeterRegistry.class})
@Slf4j
public class MetricsConfiguration {
    @Value("${spring.application.name}")
    private String applicationName;
    private final MeterRegistry registry;
    @Autowired
    private DruidCollector druidCollector;
    public MetricsConfiguration(MeterRegistry registry) {
        this.registry = registry;
    }
    @Bean
    MeterRegistryCustomizer<MeterRegistry> configurer() {
        return (registry) -> registry.config().commonTags("application", applicationName);
    }
    @Autowired
    public void bindMetricsRegistryToDruidDataSources(Collection<DataSource> dataSources) throws SQLException {
        List<DruidDataSource> druidDataSources = new ArrayList<>(dataSources.size());
        for (DataSource dataSource : dataSources) {
            DruidDataSource druidDataSource = dataSource.unwrap(DruidDataSource.class);
            if (druidDataSource != null) {
                druidDataSources.add(druidDataSource);
            }
        }
//        DruidCollector druidCollector = new DruidCollector(druidDataSources, registry);
        druidCollector.register(druidDataSources);
        log.info("finish register metrics to micrometer");
    }
}
  1. pom文件中的druid版本需要在这个以上:
xml 复制代码
     <dependency>
         <groupId>com.alibaba</groupId>
         <artifactId>druid</artifactId>
         <version>1.1.14</version>
     </dependency>

其他需要引入的maven依赖:

xml 复制代码
     <dependency>
         <groupId>org.springframework.boot</groupId>
         <artifactId>spring-boot-starter-actuator</artifactId>
     </dependency>
     <dependency>
         <groupId>io.micrometer</groupId>
         <artifactId>micrometer-registry-prometheus</artifactId>
     </dependency>
     <!-- micrometer获取JVM相关信息,并展示在Grafana上 -->
     <dependency>
         <groupId>io.github.mweirauch</groupId>
         <artifactId>micrometer-jvm-extras</artifactId>
         <version>0.2.2</version>
     </dependency>
  1. actuator配置需要支持prometheus
yml 复制代码
management:
endpoints:
 web:
   exposure:
     include: "httptrace,health,shutdown,prometheus"
 shutdown:
   enabled: true
 health:
   show-details: ALWAYS
  1. 本地验证:
    5. 对于普通的部署方式,到这一步之后,我们直接到prometheus上去配置ip和端口就可以实现监控了,像下面这样在prometheus.yml配置文件中进行配置,并重启服务prometheus服务即可。
yaml 复制代码
    #新增jvm监控任务  
  - job_name: "jvm"
    # 采集数据间隔时间
    scrape_interval: 5s
    # 采集时的超时时间
    scrape_timeout: 5s
    # 采集数据的路径
    metrics_path: '/actuator/prometheus'
    # 应用服务的地址
    static_configs:
      - targets: ['169.169.169.98:8203']
  1. 但对于容器化部署,这种ip会变的场景下,需要借助prometheus的注册发现来实现了,比如这里我们通过nacos注册发现。nacos的application.properties文件中进行修改,修改后重启nacos服务
yaml 复制代码
## 配置设置为true
nacos.prometheus.metrics.enabled=true

然后可以通过 http://ip:8848/nacos/prometheus/,查看注册内容。

  1. prometheus配置自动发现。修改prometheus的prometheus.yml,添加以下内容:
yaml 复制代码
   - job_name: 'prod-jvm'
     metrics_path: /actuator/prometheus
     scheme: http
     http_sd_configs:
      ## namespaceId/prod 表示指定prod这个命名空间下的
      ## 如果不指定命名空间,可以省去这部分,直接:http://ip:8848/nacos/prometheus/
     - url: http://ip:8848/nacos/prometheus/namespaceId/prod
相关推荐
深海的鲸同学 luvi1 分钟前
【HarmonyOS NEXT】hdc环境变量配置
linux·windows·harmonyos
吴冰_hogan2 小时前
JVM(Java虚拟机)的组成部分详解
java·开发语言·jvm
老大白菜6 小时前
Windows 11 安装 Dify 完整指南 非docker环境
windows·docker·容器
东阳马生架构9 小时前
JVM实战—1.Java代码的运行原理
jvm
ue星空11 小时前
Windbg常用命令
windows
ThisIsClark12 小时前
【后端面试总结】深入解析进程和线程的区别
java·jvm·面试
王佑辉12 小时前
【jvm】内存泄漏与内存溢出的区别
jvm
张声录113 小时前
【Prometheus】【实战篇(七)】在 Grafana 中配置数据源并使用 Prometheus Node Exporter
grafana·prometheus
大G哥14 小时前
深入理解.NET内存回收机制
jvm·.net