【grafana】Grafana Image Renderer插件获取grafana仪表盘图片(docker方式、python爬虫)

1、编写docker-compose.yml文件

#如果你已启动grafana容器,只需修改docker-compose.yml,在grafana添加环境变量,grafana-image-renderer插件的服务,官网地址:Grafana Image Renderer plugin for Grafana | Grafana Labs,根据官网该插件需要16G的内存,但我测试4c8g也可正常运行

bash 复制代码
version: '2'
  services:
    grafana:
      image: grafana/grafana:latest
      ports:
        - '3000:3000'
      environment:
        GF_RENDERING_SERVER_URL: http://renderer:8081/render
        GF_RENDERING_CALLBACK_URL: http://grafana:3000/
        GF_LOG_FILTERS: rendering:debug
    renderer:
      image: grafana/grafana-image-renderer:latest
      ports:
        - 8081:8081

2、启动docker-compose.yml

bash 复制代码
docker-compose up -d

3、测试图片

4、写python脚本批量获取图片

bash 复制代码
[root@dt1 bin]# cat download_dashboard.py
# -*- coding: utf-8 -*-

import requests
import os
import time
from datetime import datetime, timedelta

#获取当前目录的上一层目录
script_dir = os.path.dirname(os.getcwd())
#script_dir = os.path.dirname(os.path.abspath(__file__))
#图片保存目录
target_dir = os.path.join(script_dir+"/png")

#获取指定日期时间的时间戳,毫秒级别
def get_timestamp(date_time):
    return int(time.mktime(date_time.timetuple())) * 1000
#获取几天前仪表盘数据
days=7
#当前时间戳
time_now=get_timestamp(datetime.now())
#7天前时间戳
time_old=get_timestamp(datetime.now() - timedelta(days=days))
#em ip 地址
em_ip="172.16.8.193"


##获取仪表盘图片
def download_img_with_timeout(url, headers, img_name, timeout):
    try:
        response = requests.get(url, headers=headers, timeout=timeout)
        target_path = os.path.join(target_dir, img_name)
        if response.status_code == 200:
            with open(target_path, 'wb') as f:
                f.write(response.content)
            print(img_name+"仪表盘图片下载成功")
        else:
            print("获取仪表盘图片失败,HTTP 状态码:", response.status_code)
    except requests.exceptions.Timeout:
        print("请求超时")
    except requests.exceptions.RequestException as e:
        print("请求异常:", e)

#请求头,cookie可以不加,根据实际情况
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0",
    "Cookie": "dtstack=test,em_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MDk3MDU5NjcsInNlc3Npb25faGFzaF9pZCI6ImNmY2QyMDg0OTVkNTY1ZWY2NmU3ZGZmOWY5ODc2NGRhIiwidXNlcl9pZCI6MSwidXNlcl9uYW1lIjoiYWRtaW5AZHRzdGFjay5jb20iLCJ1c2VyX3Blcm1pc3Npb24iOjF9.rGjRbK-61VT-t38OXeBtmzoB9CmIGhEUdD8Nf0Crs1Y; em_username=admin@dtstack.com;"
}

# url列表
url_img_map = {
  "dashboard_disk":["Ne_roaViz/host_overview?theme=light&orgId=1&from=","44","DiskUsed.png"],
  "dashboard_mem":["Ne_roaViz/host_overview?theme=light&orgId=1&from=","50","MemUsed.png"],
  "dashboard_systemload":["Ne_roaViz/host_overview?theme=light&orgId=1&from=","38","SystemLoad.png"],
  "dashboard_uic":["qVfmgmTqf/dtuic_overview?theme=light&orgId=1&from=","210","uic_gc_count.png"],
  "dashboard_publicserver":["qVfmgmTqv/publicservice_overview?theme=light&orgId=1&from=","210","publcserver_gc_count.png"],
  "dashboard_schedulex":["qVfmgmTqg/engine_overview?theme=light&orgId=1&from=","210","schedulex_gc_count.png"],
  "dashboard_metadata":["qVfmgmTqc/metadata_overview?theme=light&orgId=1&from=","210","metadata_gc_count.png"],
  "dashboard_gateway":["qVfmgmTqh/gateway_overview?theme=light&orgId=1&from=","210","gateway_gc_count.png"],
  "dashboard_batch":["qVfmgmTqd/batch_overview?theme=light&orgId=1&from=","210","batch_gc_count.png"],
  "dashboard_stream":["qVfmgmTqn/stream_overview?theme=light&orgId=1&from=","210","stream_gc_count.png"],
  "dashboard_assets":["qVfmgmT4z-assets/assets_overview?theme=light&orgId=1&from=","210","assets_gc_count.png"],
  "dashboard_api":["qVfmgmT4z/api_overview?theme=light&orgId=1&from=","210","api_gc_count.png"],
  #"dashboard_easyindex":["
  "dashboard_tag":["qVfmgmTqm/tag_overview?theme=light&orgId=1&from=","210","tag_gc_count.png"]
}
##调用
for key, value in url_img_map.items():
  url=value[0]
  id=value[1]
  img_name=value[2]
  url="http://"+em_ip+"/grafana/render/d-solo/"+url+str(time_old)+"&to="+str(time_now)+"&panelId="+id+"&width=1000&height=500&tz=Asia%2FShanghai"
  #print url,img_name
  download_img_with_timeout(url, headers, img_name, timeout=5)

把图片加载到doc文档

bash 复制代码
[root@dt1 bin]# cat doc.py
# -*- coding: utf-8 -*-
from docx import Document
from docx.shared import Inches
import os


# 创建一个新的Word文档
doc = Document()

#获取当前目录的上一层目录
script_dir = os.path.dirname(os.getcwd())
#仪表盘路径
image_path = os.path.join(script_dir,"png")

#
title1 = u'二、服务器层面'
title2 = u'2.1 磁盘'
title3 = u'2.1.1 磁盘使用率'
doc.add_heading(title1, level=1)
doc.add_heading(title2, level=2)
doc.add_heading(title3, level=3)
doc.add_picture(image_path+"/DiskUsed.png", width=Inches(3))
#
title2 = u'2.2 内存'
title3 = u'2.2.1 内存使用率'
doc.add_heading(title2, level=2)
doc.add_heading(title3, level=3)
doc.add_picture(image_path+"/MemUsed.png", width=Inches(3))
#
title2 = u'2.3 CPU'
title3 = u'2.3.1 cpu负载情况'
doc.add_heading(title2, level=2)
doc.add_heading(title3, level=3)
doc.add_picture(image_path+"/SystemLoad.png", width=Inches(3))

#
title1 = u'四、数栈服务组件'
title2 = u'4.1 DTUic'
doc.add_heading(title1, level=1)
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/uic_gc_count.png", width=Inches(3))
title2 = u'4.2 DTPublicservice'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/publcserver_gc_count.png", width=Inches(3))
title2 = u'4.3 DTSchedulex'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/schedulex_gc_count.png", width=Inches(3))
title2 = u'4.4 DTMetadata'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/metadata_gc_count.png", width=Inches(3))
title2 = u'4.5 DTGateway'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/gateway_gc_count.png", width=Inches(3))
title2 = u'4.6 DTBatch'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/batch_gc_count.png", width=Inches(3))
title2 = u'4.7 DTStream'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/stream_gc_count.png", width=Inches(3))
title2 = u'4.8 DTAssets'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/assets_gc_count.png", width=Inches(3))
title2 = u'4.9 DTApi'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/api_gc_count.png", width=Inches(3))
title2 = u'4.10 DTEasyIndex'
doc.add_heading(title2, level=2)
#doc.add_picture(image_path+"/easyindex_gc_count.png", width=Inches(3))
title2 = u'4.11 DTTag'
doc.add_heading(title2, level=2)
doc.add_picture(image_path+"/tag_gc_count.png", width=Inches(3))


# 保存Word文档
doc.save(script_dir+"/doc/output.docx")
相关推荐
coberup4 分钟前
django Forbidden (403)错误解决方法
python·django·403错误
川石课堂软件测试27 分钟前
性能测试|docker容器下搭建JMeter+Grafana+Influxdb监控可视化平台
运维·javascript·深度学习·jmeter·docker·容器·grafana
龙哥说跨境35 分钟前
如何利用指纹浏览器爬虫绕过Cloudflare的防护?
服务器·网络·python·网络爬虫
小白学大数据1 小时前
正则表达式在Kotlin中的应用:提取图片链接
开发语言·python·selenium·正则表达式·kotlin
flashman9111 小时前
python在word中插入图片
python·microsoft·自动化·word
菜鸟的人工智能之路1 小时前
桑基图在医学数据分析中的更复杂应用示例
python·数据分析·健康医疗
懒大王爱吃狼2 小时前
Python教程:python枚举类定义和使用
开发语言·前端·javascript·python·python基础·python编程·python书籍
秃头佛爷3 小时前
Python学习大纲总结及注意事项
开发语言·python·学习
深度学习lover4 小时前
<项目代码>YOLOv8 苹果腐烂识别<目标检测>
人工智能·python·yolo·目标检测·计算机视觉·苹果腐烂识别
API快乐传递者5 小时前
淘宝反爬虫机制的主要手段有哪些?
爬虫·python