apt install -y prometheus
apt install -y prometheus-node-exporter
sudo wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -
echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list
sudo apt-get update
sudo apt install -y grafana
systemctl status grafana-server
获取ip脚本
在 /etc/promethus/目录创建脚本node.sh
#!/bin/bash
# 定义变量
TARGETS_DIR="/etc/prometheus/targets" # Prometheus 配置文件目录
TARGETS_FILE="$TARGETS_DIR/node_targets.json" # 目标文件路径
BASE_IP="10.10.11" # IP地址段
START=22 # 起始IP
END=244 # 结束IP
# 创建目录(如果不存在)
mkdir -p $TARGETS_DIR
# 开始生成JSON数组
echo '[' > $TARGETS_FILE
# 循环生成所有IP地址
for i in $(seq $START $END); do
# 如果是最后一个IP,不加逗号
if [ $i -eq $END ]; then
echo " {\"targets\": [\"$BASE_IP.$i:9100\"], \"labels\": {}}" >> $TARGETS_FILE
else
echo " {\"targets\": [\"$BASE_IP.$i:9100\"], \"labels\": {}}," >> $TARGETS_FILE
fi
done
# 结束JSON数组
echo ']' >> $TARGETS_FILE
echo "Generated targets file: $TARGETS_FILE"
echo "Total targets: $(($END - $START + 1))"
vim /etc/promethus/prometheus.yml
# Sample config for Prometheus.
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'example'
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets: ['localhost:9093']
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
scrape_timeout: 5s
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
- job_name: node
# If prometheus-node-exporter is installed, grab stats about the local
# machine by default.
file_sd_configs:
- files:
- '/etc/prometheus/targets/node_targets.json'
refresh_interval: 5m
改garfana中文界面
sed -i 's/default_language = en-US/default_language = zh-Hans/g' /usr/share/grafana/conf/defaults.ini
重启
systemctl restart prometheus
systemctl restart grafana-server.service
ip+3000访问页面