ubuntu promethus+grafana监控多台服务器

复制代码
apt install -y prometheus

apt install -y prometheus-node-exporter



sudo wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -

echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list

sudo apt-get update

sudo apt install -y grafana



systemctl status grafana-server

获取ip脚本

在 /etc/promethus/目录创建脚本node.sh

复制代码
#!/bin/bash

# 定义变量
TARGETS_DIR="/etc/prometheus/targets"  # Prometheus 配置文件目录
TARGETS_FILE="$TARGETS_DIR/node_targets.json"  # 目标文件路径
BASE_IP="10.10.11"  # IP地址段
START=22            # 起始IP
END=244             # 结束IP

# 创建目录(如果不存在)
mkdir -p $TARGETS_DIR

# 开始生成JSON数组
echo '[' > $TARGETS_FILE

# 循环生成所有IP地址
for i in $(seq $START $END); do
    # 如果是最后一个IP,不加逗号
    if [ $i -eq $END ]; then
        echo "  {\"targets\": [\"$BASE_IP.$i:9100\"], \"labels\": {}}" >> $TARGETS_FILE
    else
        echo "  {\"targets\": [\"$BASE_IP.$i:9100\"], \"labels\": {}}," >> $TARGETS_FILE
    fi
done

# 结束JSON数组
echo ']' >> $TARGETS_FILE

echo "Generated targets file: $TARGETS_FILE"
echo "Total targets: $(($END - $START + 1))"

vim /etc/promethus/prometheus.yml

复制代码
# Sample config for Prometheus.

global:
  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

  # Attach these labels to any time series or alerts when communicating with
  # external systems (federation, remote storage, Alertmanager).
  external_labels:
      monitor: 'example'

# Alertmanager configuration
alerting:
  alertmanagers:
  - static_configs:
    - targets: ['localhost:9093']

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: 'prometheus'

    # Override the global default and scrape targets from this job every 5 seconds.
    scrape_interval: 5s
    scrape_timeout: 5s

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
      - targets: ['localhost:9090']

  - job_name: node
    # If prometheus-node-exporter is installed, grab stats about the local
    # machine by default.
    file_sd_configs:
      - files:
          - '/etc/prometheus/targets/node_targets.json'  
        refresh_interval: 5m

改garfana中文界面

复制代码
sed -i 's/default_language = en-US/default_language = zh-Hans/g' /usr/share/grafana/conf/defaults.ini

重启

复制代码
systemctl restart prometheus

systemctl restart  grafana-server.service

ip+3000访问页面

相关推荐
BD_Marathon3 分钟前
【Zookeeper】监听器原理
linux·分布式·zookeeper
稚辉君.MCA_P8_Java24 分钟前
Gemini永久会员 快速排序(Quick Sort) 基于分治思想的高效排序算法
java·linux·数据结构·spring·排序算法
x***440124 分钟前
linux 设置tomcat开机启动
linux·运维·tomcat
正在努力的小河43 分钟前
Linux 块设备驱动实验
linux·运维·服务器
代码游侠2 小时前
学习笔记——数据结构学习
linux·开发语言·数据结构·笔记·学习
j***49562 小时前
Linux(CentOS)安装 Nginx
linux·nginx·centos
xuanzdhc2 小时前
Gitgit
java·linux·运维·服务器·c++·git
laocooon5238578862 小时前
win下制作一个简单的Cmake,完成运行效果
linux·运维·服务器
北顾南栀倾寒2 小时前
[杂学笔记]HTTP与HTTPS的区别、HTTPS进行TLS握手的过程、HTTPS如何防止中间人攻击、HTTP1.1与HTTP2.0的区别、TCP的拥塞控制
linux·服务器
on_pluto_3 小时前
【debug】关于如何让电脑里面的两个cuda共存
linux·服务器·前端