【grafana】Grafana Image Renderer插件获取grafana仪表盘图片(docker方式、python爬虫)

1、编写docker-compose.yml文件

#如果你已启动grafana容器,只需修改docker-compose.yml,在grafana添加环境变量,grafana-image-renderer插件的服务,官网地址:Grafana Image Renderer plugin for Grafana | Grafana Labs,根据官网该插件需要16G的内存,但我测试4c8g也可正常运行

bash 复制代码
version: '2'
  services:
    grafana:
      image: grafana/grafana:latest
      ports:
        - '3000:3000'
      environment:
        GF_RENDERING_SERVER_URL: http://renderer:8081/render
        GF_RENDERING_CALLBACK_URL: http://grafana:3000/
        GF_LOG_FILTERS: rendering:debug
    renderer:
      image: grafana/grafana-image-renderer:latest
      ports:
        - 8081:8081

2、启动docker-compose.yml

bash 复制代码
docker-compose up -d

3、测试图片

4、写python脚本批量获取图片

bash 复制代码
[root@dt1 bin]# cat download_dashboard.py
# -*- coding: utf-8 -*-

import requests
import os
import time
from datetime import datetime, timedelta

#获取当前目录的上一层目录
script_dir = os.path.dirname(os.getcwd())
#script_dir = os.path.dirname(os.path.abspath(__file__))
#图片保存目录
target_dir = os.path.join(script_dir+"/png")

#获取指定日期时间的时间戳,毫秒级别
def get_timestamp(date_time):
    return int(time.mktime(date_time.timetuple())) * 1000
#获取几天前仪表盘数据
days=7
#当前时间戳
time_now=get_timestamp(datetime.now())
#7天前时间戳
time_old=get_timestamp(datetime.now() - timedelta(days=days))
#em ip 地址
em_ip="172.16.8.193"


##获取仪表盘图片
def download_img_with_timeout(url, headers, img_name, timeout):
    try:
        response = requests.get(url, headers=headers, timeout=timeout)
        target_path = os.path.join(target_dir, img_name)
        if response.status_code == 200:
            with open(target_path, 'wb') as f:
                f.write(response.content)
            print(img_name+"仪表盘图片下载成功")
        else:
            print("获取仪表盘图片失败,HTTP 状态码:", response.status_code)
    except requests.exceptions.Timeout:
        print("请求超时")
    except requests.exceptions.RequestException as e:
        print("请求异常:", e)

#请求头,cookie可以不加,根据实际情况
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0",
    "Cookie": "dtstack=test,em_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MDk3MDU5NjcsInNlc3Npb25faGFzaF9pZCI6ImNmY2QyMDg0OTVkNTY1ZWY2NmU3ZGZmOWY5ODc2NGRhIiwidXNlcl9pZCI6MSwidXNlcl9uYW1lIjoiYWRtaW5AZHRzdGFjay5jb20iLCJ1c2VyX3Blcm1pc3Npb24iOjF9.rGjRbK-61VT-t38OXeBtmzoB9CmIGhEUdD8Nf0Crs1Y; em_username=admin@dtstack.com;"
}

# url列表
url_img_map = {
  "dashboard_disk":["Ne_roaViz/host_overview?theme=light&orgId=1&from=","44","DiskUsed.png"],
  "dashboard_mem":["Ne_roaViz/host_overview?theme=light&orgId=1&from=","50","MemUsed.png"],
  "dashboard_systemload":["Ne_roaViz/host_overview?theme=light&orgId=1&from=","38","SystemLoad.png"],
  "dashboard_uic":["qVfmgmTqf/dtuic_overview?theme=light&orgId=1&from=","210","uic_gc_count.png"],
  "dashboard_publicserver":["qVfmgmTqv/publicservice_overview?theme=light&orgId=1&from=","210","publcserver_gc_count.png"],
  "dashboard_schedulex":["qVfmgmTqg/engine_overview?theme=light&orgId=1&from=","210","schedulex_gc_count.png"],
  "dashboard_metadata":["qVfmgmTqc/metadata_overview?theme=light&orgId=1&from=","210","metadata_gc_count.png"],
  "dashboard_gateway":["qVfmgmTqh/gateway_overview?theme=light&orgId=1&from=","210","gateway_gc_count.png"],
  "dashboard_batch":["qVfmgmTqd/batch_overview?theme=light&orgId=1&from=","210","batch_gc_count.png"],
  "dashboard_stream":["qVfmgmTqn/stream_overview?theme=light&orgId=1&from=","210","stream_gc_count.png"],
  "dashboard_assets":["qVfmgmT4z-assets/assets_overview?theme=light&orgId=1&from=","210","assets_gc_count.png"],
  "dashboard_api":["qVfmgmT4z/api_overview?theme=light&orgId=1&from=","210","api_gc_count.png"],
  #"dashboard_easyindex":["
  "dashboard_tag":["qVfmgmTqm/tag_overview?theme=light&orgId=1&from=","210","tag_gc_count.png"]
}
##调用
for key, value in url_img_map.items():
  url=value[0]
  id=value[1]
  img_name=value[2]
  url="http://"+em_ip+"/grafana/render/d-solo/"+url+str(time_old)+"&to="+str(time_now)+"&panelId="+id+"&width=1000&height=500&tz=Asia%2FShanghai"
  #print url,img_name
  download_img_with_timeout(url, headers, img_name, timeout=5)

把图片加载到doc文档

bash 复制代码
[root@dt1 bin]# cat doc.py
# -*- coding: utf-8 -*-
from docx import Document
from docx.shared import Inches
import os


# 创建一个新的Word文档
doc = Document()

#获取当前目录的上一层目录
script_dir = os.path.dirname(os.getcwd())
#仪表盘路径
image_path = os.path.join(script_dir,"png")

#
title1 = u'二、服务器层面'
title2 = u'2.1 磁盘'
title3 = u'2.1.1 磁盘使用率'
doc.add_heading(title1, level=1)
doc.add_heading(title2, level=2)
doc.add_heading(title3, level=3)
doc.add_picture(image_path+"/DiskUsed.png", width=Inches(3))
#
title2 = u'2.2 内存'
title3 = u'2.2.1 内存使用率'
doc.add_heading(title2, level=2)
doc.add_heading(title3, level=3)
doc.add_picture(image_path+"/MemUsed.png", width=Inches(3))
#
title2 = u'2.3 CPU'
title3 = u'2.3.1 cpu负载情况'
doc.add_heading(title2, level=2)
doc.add_heading(title3, level=3)
doc.add_picture(image_path+"/SystemLoad.png", width=Inches(3))

#
title1 = u'四、数栈服务组件'
title2 = u'4.1 DTUic'
doc.add_heading(title1, level=1)
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/uic_gc_count.png", width=Inches(3))
title2 = u'4.2 DTPublicservice'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/publcserver_gc_count.png", width=Inches(3))
title2 = u'4.3 DTSchedulex'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/schedulex_gc_count.png", width=Inches(3))
title2 = u'4.4 DTMetadata'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/metadata_gc_count.png", width=Inches(3))
title2 = u'4.5 DTGateway'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/gateway_gc_count.png", width=Inches(3))
title2 = u'4.6 DTBatch'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/batch_gc_count.png", width=Inches(3))
title2 = u'4.7 DTStream'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/stream_gc_count.png", width=Inches(3))
title2 = u'4.8 DTAssets'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/assets_gc_count.png", width=Inches(3))
title2 = u'4.9 DTApi'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/api_gc_count.png", width=Inches(3))
title2 = u'4.10 DTEasyIndex'
doc.add_heading(title2, level=2)
#doc.add_picture(image_path+"/easyindex_gc_count.png", width=Inches(3))
title2 = u'4.11 DTTag'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/tag_gc_count.png", width=Inches(3))


# 保存Word文档
doc.save(script_dir+"/doc/output.docx")
相关推荐
好看资源平台9 分钟前
网络爬虫——综合实战项目:多平台房源信息采集与分析系统
爬虫·python
进击的六角龙30 分钟前
深入浅出:使用Python调用API实现智能天气预报
开发语言·python
檀越剑指大厂30 分钟前
【Python系列】浅析 Python 中的字典更新与应用场景
开发语言·python
湫ccc38 分钟前
Python简介以及解释器安装(保姆级教学)
开发语言·python
孤独且没人爱的纸鹤41 分钟前
【深度学习】:从人工神经网络的基础原理到循环神经网络的先进技术,跨越智能算法的关键发展阶段及其未来趋势,探索技术进步与应用挑战
人工智能·python·深度学习·机器学习·ai
羊小猪~~44 分钟前
tensorflow案例7--数据增强与测试集, 训练集, 验证集的构建
人工智能·python·深度学习·机器学习·cnn·tensorflow·neo4j
lzhlizihang1 小时前
python如何使用spark操作hive
hive·python·spark
q0_0p1 小时前
牛客小白月赛105 (Python题解) A~E
python·牛客
极客代码1 小时前
【Python TensorFlow】进阶指南(续篇三)
开发语言·人工智能·python·深度学习·tensorflow
庞传奇1 小时前
TensorFlow 的基本概念和使用场景
人工智能·python·tensorflow