ubuntu promethus+grafana监控多台服务器

复制代码
apt install -y prometheus

apt install -y prometheus-node-exporter



sudo wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -

echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list

sudo apt-get update

sudo apt install -y grafana



systemctl status grafana-server

获取ip脚本

在 /etc/promethus/目录创建脚本node.sh

复制代码
#!/bin/bash

# 定义变量
TARGETS_DIR="/etc/prometheus/targets"  # Prometheus 配置文件目录
TARGETS_FILE="$TARGETS_DIR/node_targets.json"  # 目标文件路径
BASE_IP="10.10.11"  # IP地址段
START=22            # 起始IP
END=244             # 结束IP

# 创建目录(如果不存在)
mkdir -p $TARGETS_DIR

# 开始生成JSON数组
echo '[' > $TARGETS_FILE

# 循环生成所有IP地址
for i in $(seq $START $END); do
    # 如果是最后一个IP,不加逗号
    if [ $i -eq $END ]; then
        echo "  {\"targets\": [\"$BASE_IP.$i:9100\"], \"labels\": {}}" >> $TARGETS_FILE
    else
        echo "  {\"targets\": [\"$BASE_IP.$i:9100\"], \"labels\": {}}," >> $TARGETS_FILE
    fi
done

# 结束JSON数组
echo ']' >> $TARGETS_FILE

echo "Generated targets file: $TARGETS_FILE"
echo "Total targets: $(($END - $START + 1))"

vim /etc/promethus/prometheus.yml

复制代码
# Sample config for Prometheus.

global:
  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

  # Attach these labels to any time series or alerts when communicating with
  # external systems (federation, remote storage, Alertmanager).
  external_labels:
      monitor: 'example'

# Alertmanager configuration
alerting:
  alertmanagers:
  - static_configs:
    - targets: ['localhost:9093']

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: 'prometheus'

    # Override the global default and scrape targets from this job every 5 seconds.
    scrape_interval: 5s
    scrape_timeout: 5s

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
      - targets: ['localhost:9090']

  - job_name: node
    # If prometheus-node-exporter is installed, grab stats about the local
    # machine by default.
    file_sd_configs:
      - files:
          - '/etc/prometheus/targets/node_targets.json'  
        refresh_interval: 5m

改garfana中文界面

复制代码
sed -i 's/default_language = en-US/default_language = zh-Hans/g' /usr/share/grafana/conf/defaults.ini

重启

复制代码
systemctl restart prometheus

systemctl restart  grafana-server.service

ip+3000访问页面

相关推荐
顶点多余17 分钟前
进程间通信 --- 共享内存篇(通信速度最快)
linux·服务器·jvm
co_wait27 分钟前
【c 语言】linux下gcc编译工具的使用
linux·c语言·开发语言
liulilittle28 分钟前
LINUX RING BUFFER TUN/TAP 1
linux·服务器·网络·c++·信息与通信·通信
supersolon30 分钟前
WSL2(Linux)升级docker
linux·运维·docker·wsl·升级
路由侠内网穿透.1 小时前
本地部署开源书签管理工具 LinkAce 并实现外部访问( Linux 版本)
linux·运维·服务器·网络·网络协议·开源
小飞菜涅1 小时前
fast-lio2复现
嵌入式硬件·学习·ubuntu
ljh5746491192 小时前
linux awk 命令
linux·运维·chrome
向依阳2 小时前
RV1126准备-----编译和测试SDK自带的RKNN例程
linux·rv1226
the sun342 小时前
Linux驱动开发:环境准备与报错处理
linux·运维·服务器
MC_J2 小时前
Linux 6.1 移植RTL8723du驱动
linux·arm