draw_tensor2psd.py0126v1

import cv2

import numpy as np

import math

import os

import struct

from tqdm import tqdm

from glob import glob

PALETTE = np.random.randint(0, 255, [255, 3], dtype=np.uint32)

模型输入尺寸(W, H),用于把模型坐标缩放回原图

MODEL_IN_W = 608

MODEL_IN_H = 736

调试:同时输出"模型输入尺寸"的可视化

DEBUG_DRAW_MODEL_SIZE = True

zh

imagespath = "/ai/DataSets/OD_FSD_zh/TI_test/rm/4/image/"

imagespath = "/ai/DataSets/OD_FSD_zh/TI_test/ppchen/DVR-20250804152834-2382380-PLR/image/"

savepath = "/ai/zhdata/multiyolov5_point_v2/test_images/out"

zh1 = 0

def readTensor(tensorFile):

global zh1

tensor = open(tensorFile,'rb')

infer_data = np.fromfile(tensor, dtype=np.int32)

print(infer_data.shape)

soltnum = int(len(infer_data) / 20)

im0 = cv2.imread(imagespath + tensorFile.split('/')[-1][:-8] + '.bmp')

if im0 is None:

print("读取图片失败:", tensorFile)

return

h0, w0 = im0.shape[:2]

sx = w0 / float(MODEL_IN_W)

sy = h0 / float(MODEL_IN_H)

point_all =[]

for i in range(soltnum):

point_dict1={}

raw_x1 = max(int(infer_data[20*i+6]), 0)

raw_y1 = max(int(infer_data[20*i+7]), 0)

raw_x2 = max(int(infer_data[20*i+8]), 0)

raw_y2 = max(int(infer_data[20*i+9]), 0)

raw_x3 = max(int(infer_data[20*i+10]), 0)

raw_y3 = max(int(infer_data[20*i+11]), 0)

raw_x4 = max(int(infer_data[20*i+12]), 0)

raw_y4 = max(int(infer_data[20*i+13]), 0)

x1 = max(int(raw_x1 * sx), 0)

y1 = max(int(raw_y1 * sy), 0)

x2 = max(int(raw_x2 * sx), 0)

y2 = max(int(raw_y2 * sy), 0)

x3 = max(int(raw_x3 * sx), 0)

y3 = max(int(raw_y3 * sy), 0)

x4 = max(int(raw_x4 * sx), 0)

y4 = max(int(raw_y4 * sy), 0)

zh = struct.unpack('!f',int(bin(infer_data[20*i+4])[2:],2).to_bytes(4,byteorder='big'))

point_dict1["conf"] = struct.unpack('!f',int(bin(infer_data[20*i+4])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["isOccupied"] = struct.unpack('!f',int(bin(infer_data[20*i+14])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["isVIP"] = struct.unpack('!f',int(bin(infer_data[20*i+15])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["iswoman"] = struct.unpack('!f',int(bin(infer_data[20*i+16])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["isdisabled"] = struct.unpack('!f',int(bin(infer_data[20*i+17])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["ischarging"] = struct.unpack('!f',int(bin(infer_data[20*i+18])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["step"] = struct.unpack('!f',int(bin(infer_data[20*i+19])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["name"] = str(struct.unpack('!f',int(bin(infer_data[20*i+5])[2:],2).to_bytes(4,byteorder='big'))[0])

point_dict1["delrule"] = 0

point_dict1["pointx"] = [x1,x2,x3,x4]

point_dict1["pointy"] = [y1,y2,y3,y4]

if DEBUG_DRAW_MODEL_SIZE:

point_dict1["raw_pointx"] = [raw_x1, raw_x2, raw_x3, raw_x4]

point_dict1["raw_pointy"] = [raw_y1, raw_y2, raw_y3, raw_y4]

if x3 > 1000 or x4 > 1000 or x1 > 1000 or x2 > 1000 or y3 > 1000 or y4 > 1000 or y1 > 1000 or y2 > 1000:

zh1 +=1

kk = struct.unpack('!f',int(bin(infer_data[20*i+17])[2:],2).to_bytes(4,byteorder='big'))[0]

print("数据解析错误"+ tensorFile + str(x3) + '--'+ str(zh1))

break

point_all.append(point_dict1)

end = len(point_all)

for i in range(len(point_all)):

if point_all[i]["delrule"] == 0:

for j in range(i+1,end):

#简单就是求入口顶点之间的距离

xi1 = point_all[i]['pointx'][0]

yi1 = point_all[i]['pointy'][0]

xi2 = point_all[i]['pointx'][1]

yi2 = point_all[i]['pointy'][1]

xj1 = point_all[j]['pointx'][0]

yj1 = point_all[j]['pointy'][0]

xj2 = point_all[j]['pointx'][1]

yj2 = point_all[j]['pointy'][1]

if (abs(xi1 - xj1) + abs(yi1 - yj1)) < 40 or (abs(xi2 - xj2) + abs(yi2 - yj2)) < 40:

point_all[j]["delrule"] = 1

for i in range(len(point_all)):

if point_all[i]["delrule"] == 0:

line1 = [point_all[i]['pointx'][0],point_all[i]['pointy'][0],point_all[i]['pointx'][3],point_all[i]['pointy'][3]]

line2 = [point_all[i]['pointx'][1],point_all[i]['pointy'][1],point_all[i]['pointx'][2],point_all[i]['pointy'][2]]

vec1 =[line1[2]-line1[0],line1[3]-line1[1]]

vec2 =[line2[2]-line2[0],line2[3]-line2[1]]

#计算向量的点积和模长

dot_product = vec1[0] * vec2[0] + vec1[1] * vec2[1]

m1 = math.sqrt(vec1[0]**2 + vec1[1]**2) + 0.000000000001

m2 = math.sqrt(vec2[0]**2 + vec2[1]**2) + 0.000000000001

val = dot_product/(m1 * m2)

if val > 1:

val = 1

if val < -1:

val = -15

radians = math.acos(val)

du = math.degrees(radians)

if du > 20:

point_all[i]["delrule"] = 2

if 1:

for point_i in point_all:

if point_i["delrule"] == 0:

if point_i["conf"] > 0.45:#0.45

print(point_i["conf"])

cv2.putText(im0, f'{point_i["conf"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 6),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #置信度

cv2.putText(im0, point_i["name"],

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 30),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #类别

if float(point_i["isOccupied"])> 0.1: #0.5

cv2.putText(im0, "Occ :" + f'{point_i["isOccupied"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 54),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否被占用

if float(point_i["isVIP"]) > 0.5:

cv2.putText(im0, "VIP :" + f'{point_i["isVIP"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 78),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否VIP车位

if float(point_i["iswoman"]) > 0.5:

cv2.putText(im0, "woman :" + f'{point_i["iswoman"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 102),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否女性车位

if float(point_i["isdisabled"]) > 0.5:

cv2.putText(im0, "disab :" + f'{point_i["isdisabled"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 126),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否残疾人车位

if float(point_i["ischarging"]) > 0.5:

cv2.putText(im0, "charg :" + f'{point_i["ischarging"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 150),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否充电车位

if float(point_i["step"]) > 0.5:

cv2.putText(im0, "step :" + f'{point_i["step"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 174),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否阶梯形车位

cv2.arrowedLine(im0, (point_i["pointx"][0], point_i["pointy"][0]),(point_i["pointx"][1], point_i["pointy"][1]), (0, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][1], point_i["pointy"][1]),(point_i["pointx"][2], point_i["pointy"][2]), (255, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][2], point_i["pointy"][2]),(point_i["pointx"][3], point_i["pointy"][3]), (255, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][3], point_i["pointy"][3]),(point_i["pointx"][0], point_i["pointy"][0]), (255, 255, 0), 1, cv2.LINE_AA)

else:

cv2.putText(im0, f'{point_i["conf"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 6),

cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255),3)

cv2.arrowedLine(im0, (point_i["pointx"][0], point_i["pointy"][0]),(point_i["pointx"][1], point_i["pointy"][1]), (0, 0, 255), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][1], point_i["pointy"][1]),(point_i["pointx"][2], point_i["pointy"][2]), (0, 0, 255), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][2], point_i["pointy"][2]),(point_i["pointx"][3], point_i["pointy"][3]), (0, 0, 255), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][3], point_i["pointy"][3]),(point_i["pointx"][0], point_i["pointy"][0]), (0, 0, 255), 1, cv2.LINE_AA)

if point_i["delrule"] == 1:

cv2.putText(im0, f'{point_i["conf"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 6),

cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255),3)

cv2.arrowedLine(im0, (point_i["pointx"][0], point_i["pointy"][0]),(point_i["pointx"][1], point_i["pointy"][1]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][1], point_i["pointy"][1]),(point_i["pointx"][2], point_i["pointy"][2]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][2], point_i["pointy"][2]),(point_i["pointx"][3], point_i["pointy"][3]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][3], point_i["pointy"][3]),(point_i["pointx"][0], point_i["pointy"][0]), (0, 0, 0), 1, cv2.LINE_AA)

if point_i["delrule"] == 2:

cv2.putText(im0, f'{point_i["conf"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 6),

cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255),3)

cv2.arrowedLine(im0, (point_i["pointx"][0], point_i["pointy"][0]),(point_i["pointx"][1], point_i["pointy"][1]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][1], point_i["pointy"][1]),(point_i["pointx"][2], point_i["pointy"][2]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][2], point_i["pointy"][2]),(point_i["pointx"][3], point_i["pointy"][3]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][3], point_i["pointy"][3]),(point_i["pointx"][0], point_i["pointy"][0]), (0, 0, 0), 1, cv2.LINE_AA)

zh = "/ai/TopViewMul/4/psd_out/" + tensorFile.split('/')[-1][:-8] + '.bmp'

print(zh)

cv2.imwrite(zh, im0)

if DEBUG_DRAW_MODEL_SIZE:

im_model = cv2.resize(im0, (MODEL_IN_W, MODEL_IN_H))

for point_i in point_all:

if point_i.get("raw_pointx") is None:

continue

if point_i["delrule"] == 0 and point_i["conf"] > 0.45:

cv2.arrowedLine(im_model, (point_i["raw_pointx"][0], point_i["raw_pointy"][0]), (point_i["raw_pointx"][1], point_i["raw_pointy"][1]), (0, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im_model, (point_i["raw_pointx"][1], point_i["raw_pointy"][1]), (point_i["raw_pointx"][2], point_i["raw_pointy"][2]), (255, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im_model, (point_i["raw_pointx"][2], point_i["raw_pointy"][2]), (point_i["raw_pointx"][3], point_i["raw_pointy"][3]), (255, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im_model, (point_i["raw_pointx"][3], point_i["raw_pointy"][3]), (point_i["raw_pointx"][0], point_i["raw_pointy"][0]), (255, 255, 0), 1, cv2.LINE_AA)

zh_model = "/ai/TopViewMul/4/psd_out/" + tensorFile.split('/')[-1][:-8] + '_model.bmp'

cv2.imwrite(zh_model, im_model)

zh

for binpath in tqdm(glob(os.path.join("/ai/DataSets/OD_FSD_zh/TI_test/rm/2/psd/",'*.bin'))):

readTensor(binpath)

num=0

for binpath in tqdm(glob(os.path.join("/ai/TopViewMul/4/out_480_544_2039/psd/",'*.bin'))):

readTensor(binpath)

num+=1

if num==100:

print('exit')

exit(0)

相关推荐
weixin_395448919 小时前
draw_tensor2psd.py——0126v2
支持向量机·逻辑回归·启发式算法
救救孩子把11 小时前
63-机器学习与大模型开发数学教程-5-10 最优化在机器学习中的典型应用(逻辑回归、SVM)
机器学习·支持向量机·逻辑回归
机器学习之心11 小时前
集群中继无人机应急通信双层多目标协同优化部署:融合无监督学习与凸优化及启发式算法的MATLAB代码
学习·无人机·启发式算法·双层多目标协同优化
开开心心_Every1 天前
手机PDF处理工具:支持格式转换与批注
游戏·微信·智能手机·pdf·逻辑回归·excel·语音识别
Brduino脑机接口技术答疑3 天前
脑机接口数据处理连载(九) 经典分类算法(一):支持向量机(SVM)数据建模——基于脑机接口(BCI)运动想象任务实战
支持向量机·分类·数据挖掘
开开心心_Every3 天前
时间自动校准工具:一键同步网络服务器时间
游戏·随机森林·微信·pdf·逻辑回归·excel·语音识别
(; ̄ェ ̄)。5 天前
机器学习入门(十一)逻辑回归,分类问题评估
机器学习·分类·逻辑回归
最低调的奢华5 天前
支持向量机和xgboost及卡方分箱解释
算法·机器学习·支持向量机
Lips6116 天前
第六章 支持向量机
算法·机器学习·支持向量机