draw_tensor2psd.py0126v1

import cv2

import numpy as np

import math

import os

import struct

from tqdm import tqdm

from glob import glob

PALETTE = np.random.randint(0, 255, [255, 3], dtype=np.uint32)

模型输入尺寸(W, H),用于把模型坐标缩放回原图

MODEL_IN_W = 608

MODEL_IN_H = 736

调试:同时输出"模型输入尺寸"的可视化

DEBUG_DRAW_MODEL_SIZE = True

zh

imagespath = "/ai/DataSets/OD_FSD_zh/TI_test/rm/4/image/"

imagespath = "/ai/DataSets/OD_FSD_zh/TI_test/ppchen/DVR-20250804152834-2382380-PLR/image/"

savepath = "/ai/zhdata/multiyolov5_point_v2/test_images/out"

zh1 = 0

def readTensor(tensorFile):

global zh1

tensor = open(tensorFile,'rb')

infer_data = np.fromfile(tensor, dtype=np.int32)

print(infer_data.shape)

soltnum = int(len(infer_data) / 20)

im0 = cv2.imread(imagespath + tensorFile.split('/')[-1][:-8] + '.bmp')

if im0 is None:

print("读取图片失败:", tensorFile)

return

h0, w0 = im0.shape[:2]

sx = w0 / float(MODEL_IN_W)

sy = h0 / float(MODEL_IN_H)

point_all =[]

for i in range(soltnum):

point_dict1={}

raw_x1 = max(int(infer_data[20*i+6]), 0)

raw_y1 = max(int(infer_data[20*i+7]), 0)

raw_x2 = max(int(infer_data[20*i+8]), 0)

raw_y2 = max(int(infer_data[20*i+9]), 0)

raw_x3 = max(int(infer_data[20*i+10]), 0)

raw_y3 = max(int(infer_data[20*i+11]), 0)

raw_x4 = max(int(infer_data[20*i+12]), 0)

raw_y4 = max(int(infer_data[20*i+13]), 0)

x1 = max(int(raw_x1 * sx), 0)

y1 = max(int(raw_y1 * sy), 0)

x2 = max(int(raw_x2 * sx), 0)

y2 = max(int(raw_y2 * sy), 0)

x3 = max(int(raw_x3 * sx), 0)

y3 = max(int(raw_y3 * sy), 0)

x4 = max(int(raw_x4 * sx), 0)

y4 = max(int(raw_y4 * sy), 0)

zh = struct.unpack('!f',int(bin(infer_data[20*i+4])[2:],2).to_bytes(4,byteorder='big'))

point_dict1["conf"] = struct.unpack('!f',int(bin(infer_data[20*i+4])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["isOccupied"] = struct.unpack('!f',int(bin(infer_data[20*i+14])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["isVIP"] = struct.unpack('!f',int(bin(infer_data[20*i+15])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["iswoman"] = struct.unpack('!f',int(bin(infer_data[20*i+16])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["isdisabled"] = struct.unpack('!f',int(bin(infer_data[20*i+17])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["ischarging"] = struct.unpack('!f',int(bin(infer_data[20*i+18])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["step"] = struct.unpack('!f',int(bin(infer_data[20*i+19])[2:],2).to_bytes(4,byteorder='big'))[0]

point_dict1["name"] = str(struct.unpack('!f',int(bin(infer_data[20*i+5])[2:],2).to_bytes(4,byteorder='big'))[0])

point_dict1["delrule"] = 0

point_dict1["pointx"] = [x1,x2,x3,x4]

point_dict1["pointy"] = [y1,y2,y3,y4]

if DEBUG_DRAW_MODEL_SIZE:

point_dict1["raw_pointx"] = [raw_x1, raw_x2, raw_x3, raw_x4]

point_dict1["raw_pointy"] = [raw_y1, raw_y2, raw_y3, raw_y4]

if x3 > 1000 or x4 > 1000 or x1 > 1000 or x2 > 1000 or y3 > 1000 or y4 > 1000 or y1 > 1000 or y2 > 1000:

zh1 +=1

kk = struct.unpack('!f',int(bin(infer_data[20*i+17])[2:],2).to_bytes(4,byteorder='big'))[0]

print("数据解析错误"+ tensorFile + str(x3) + '--'+ str(zh1))

break

point_all.append(point_dict1)

end = len(point_all)

for i in range(len(point_all)):

if point_all[i]["delrule"] == 0:

for j in range(i+1,end):

#简单就是求入口顶点之间的距离

xi1 = point_all[i]['pointx'][0]

yi1 = point_all[i]['pointy'][0]

xi2 = point_all[i]['pointx'][1]

yi2 = point_all[i]['pointy'][1]

xj1 = point_all[j]['pointx'][0]

yj1 = point_all[j]['pointy'][0]

xj2 = point_all[j]['pointx'][1]

yj2 = point_all[j]['pointy'][1]

if (abs(xi1 - xj1) + abs(yi1 - yj1)) < 40 or (abs(xi2 - xj2) + abs(yi2 - yj2)) < 40:

point_all[j]["delrule"] = 1

for i in range(len(point_all)):

if point_all[i]["delrule"] == 0:

line1 = [point_all[i]['pointx'][0],point_all[i]['pointy'][0],point_all[i]['pointx'][3],point_all[i]['pointy'][3]]

line2 = [point_all[i]['pointx'][1],point_all[i]['pointy'][1],point_all[i]['pointx'][2],point_all[i]['pointy'][2]]

vec1 =[line1[2]-line1[0],line1[3]-line1[1]]

vec2 =[line2[2]-line2[0],line2[3]-line2[1]]

#计算向量的点积和模长

dot_product = vec1[0] * vec2[0] + vec1[1] * vec2[1]

m1 = math.sqrt(vec1[0]**2 + vec1[1]**2) + 0.000000000001

m2 = math.sqrt(vec2[0]**2 + vec2[1]**2) + 0.000000000001

val = dot_product/(m1 * m2)

if val > 1:

val = 1

if val < -1:

val = -15

radians = math.acos(val)

du = math.degrees(radians)

if du > 20:

point_all[i]["delrule"] = 2

if 1:

for point_i in point_all:

if point_i["delrule"] == 0:

if point_i["conf"] > 0.45:#0.45

print(point_i["conf"])

cv2.putText(im0, f'{point_i["conf"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 6),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #置信度

cv2.putText(im0, point_i["name"],

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 30),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #类别

if float(point_i["isOccupied"])> 0.1: #0.5

cv2.putText(im0, "Occ :" + f'{point_i["isOccupied"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 54),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否被占用

if float(point_i["isVIP"]) > 0.5:

cv2.putText(im0, "VIP :" + f'{point_i["isVIP"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 78),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否VIP车位

if float(point_i["iswoman"]) > 0.5:

cv2.putText(im0, "woman :" + f'{point_i["iswoman"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 102),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否女性车位

if float(point_i["isdisabled"]) > 0.5:

cv2.putText(im0, "disab :" + f'{point_i["isdisabled"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 126),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否残疾人车位

if float(point_i["ischarging"]) > 0.5:

cv2.putText(im0, "charg :" + f'{point_i["ischarging"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 150),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否充电车位

if float(point_i["step"]) > 0.5:

cv2.putText(im0, "step :" + f'{point_i["step"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 174),

cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),2) #是否阶梯形车位

cv2.arrowedLine(im0, (point_i["pointx"][0], point_i["pointy"][0]),(point_i["pointx"][1], point_i["pointy"][1]), (0, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][1], point_i["pointy"][1]),(point_i["pointx"][2], point_i["pointy"][2]), (255, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][2], point_i["pointy"][2]),(point_i["pointx"][3], point_i["pointy"][3]), (255, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][3], point_i["pointy"][3]),(point_i["pointx"][0], point_i["pointy"][0]), (255, 255, 0), 1, cv2.LINE_AA)

else:

cv2.putText(im0, f'{point_i["conf"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 6),

cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255),3)

cv2.arrowedLine(im0, (point_i["pointx"][0], point_i["pointy"][0]),(point_i["pointx"][1], point_i["pointy"][1]), (0, 0, 255), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][1], point_i["pointy"][1]),(point_i["pointx"][2], point_i["pointy"][2]), (0, 0, 255), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][2], point_i["pointy"][2]),(point_i["pointx"][3], point_i["pointy"][3]), (0, 0, 255), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][3], point_i["pointy"][3]),(point_i["pointx"][0], point_i["pointy"][0]), (0, 0, 255), 1, cv2.LINE_AA)

if point_i["delrule"] == 1:

cv2.putText(im0, f'{point_i["conf"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 6),

cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255),3)

cv2.arrowedLine(im0, (point_i["pointx"][0], point_i["pointy"][0]),(point_i["pointx"][1], point_i["pointy"][1]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][1], point_i["pointy"][1]),(point_i["pointx"][2], point_i["pointy"][2]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][2], point_i["pointy"][2]),(point_i["pointx"][3], point_i["pointy"][3]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][3], point_i["pointy"][3]),(point_i["pointx"][0], point_i["pointy"][0]), (0, 0, 0), 1, cv2.LINE_AA)

if point_i["delrule"] == 2:

cv2.putText(im0, f'{point_i["conf"]:.3f}',

(point_i["pointx"][0] + 6, point_i["pointy"][0] + 6),

cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255),3)

cv2.arrowedLine(im0, (point_i["pointx"][0], point_i["pointy"][0]),(point_i["pointx"][1], point_i["pointy"][1]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][1], point_i["pointy"][1]),(point_i["pointx"][2], point_i["pointy"][2]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][2], point_i["pointy"][2]),(point_i["pointx"][3], point_i["pointy"][3]), (0, 0, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im0, (point_i["pointx"][3], point_i["pointy"][3]),(point_i["pointx"][0], point_i["pointy"][0]), (0, 0, 0), 1, cv2.LINE_AA)

zh = "/ai/TopViewMul/4/psd_out/" + tensorFile.split('/')[-1][:-8] + '.bmp'

print(zh)

cv2.imwrite(zh, im0)

if DEBUG_DRAW_MODEL_SIZE:

im_model = cv2.resize(im0, (MODEL_IN_W, MODEL_IN_H))

for point_i in point_all:

if point_i.get("raw_pointx") is None:

continue

if point_i["delrule"] == 0 and point_i["conf"] > 0.45:

cv2.arrowedLine(im_model, (point_i["raw_pointx"][0], point_i["raw_pointy"][0]), (point_i["raw_pointx"][1], point_i["raw_pointy"][1]), (0, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im_model, (point_i["raw_pointx"][1], point_i["raw_pointy"][1]), (point_i["raw_pointx"][2], point_i["raw_pointy"][2]), (255, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im_model, (point_i["raw_pointx"][2], point_i["raw_pointy"][2]), (point_i["raw_pointx"][3], point_i["raw_pointy"][3]), (255, 255, 0), 1, cv2.LINE_AA)

cv2.arrowedLine(im_model, (point_i["raw_pointx"][3], point_i["raw_pointy"][3]), (point_i["raw_pointx"][0], point_i["raw_pointy"][0]), (255, 255, 0), 1, cv2.LINE_AA)

zh_model = "/ai/TopViewMul/4/psd_out/" + tensorFile.split('/')[-1][:-8] + '_model.bmp'

cv2.imwrite(zh_model, im_model)

zh

for binpath in tqdm(glob(os.path.join("/ai/DataSets/OD_FSD_zh/TI_test/rm/2/psd/",'*.bin'))):

readTensor(binpath)

num=0

for binpath in tqdm(glob(os.path.join("/ai/TopViewMul/4/out_480_544_2039/psd/",'*.bin'))):

readTensor(binpath)

num+=1

if num==100:

print('exit')

exit(0)

相关推荐
2501_9248787313 小时前
数据智能驱动进化:AdAgent 多触点归因与自我学习机制详解
人工智能·逻辑回归·动态规划
我材不敲代码20 小时前
机器学习入门 04逻辑回归part2——提高逻辑回归模型的召回率
人工智能·机器学习·逻辑回归
WHD3061 天前
苏州误删除 格式化 服务器文件 恢复
随机森林·支持向量机·深度优先·爬山算法·宽度优先·推荐算法·最小二乘法
Daydream.V1 天前
逻辑回归实例问题解决(LogisticRegression)
算法·机器学习·逻辑回归
纤纡.2 天前
逻辑回归实战进阶:交叉验证与采样技术破解数据痛点(二)
算法·机器学习·逻辑回归
A尘埃2 天前
银行个人贷款违约风险预测(逻辑回归)
算法·机器学习·逻辑回归
子非鱼9212 天前
机器学习之逻辑回归
人工智能·机器学习·逻辑回归
爱吃rabbit的mq2 天前
第10章:支持向量机:找到最佳边界
算法·机器学习·支持向量机
木非哲2 天前
AB实验高级必修课(四):逻辑回归的“马甲”、AUC的概率本质与阈值博弈
算法·机器学习·逻辑回归·abtest
2501_924878732 天前
企业级营销安全防线:AdAgent 合规风控体系设计与实践
人工智能·逻辑回归·动态规划