yolov8 区域多类别计数

yolov8 区域多类别计数

  • [1. 基础](#1. 基础)
  • [2. 计数功能](#2. 计数功能)
    • [2.1 计数模块](#2.1 计数模块)
    • [2.2 判断模块](#2.2 判断模块)
  • [3. 初始代码](#3. 初始代码)
  • [4. 实验结果](#4. 实验结果)
  • [5. 完整代码](#5. 完整代码)
  • [6. 源码](#6. 源码)

1. 基础

本项目是在 Windows+YOLOV8环境配置 的基础上实现的,测距原理可见上边文章

2. 计数功能

2.1 计数模块

在指定区域内计数模块
region_points为指定区域,可设置为任意形状

python 复制代码
region_points = [(20, 20), (20, 600), (1200, 600), (1200, 20)]
count_class1 = 0
count_class2 = 0
for i, box in enumerate(boxes):
   x1, y1, x2, y2 = box[:4]
   x_center = (x1 + x2) / 2
   y_center = (y1 + y2) / 2
   center_point = (int(x_center), int(y_center))
   if is_inside_region(center_point, region_points):
       if box[-1] == 0:  # 类别1的标签.人
           count_class1 += 1
       elif  box[-1] == 2:  # 类别2的标签,车
           count_class2 += 1

2.2 判断模块

python 复制代码
def is_inside_region(point, region_points):
    """
    判断点是否在指定区域内
    """
    return cv2.pointPolygonTest(np.array(region_points), point, False) >= 0

3. 初始代码

python 复制代码
import cv2
import numpy as np
from ultralytics import YOLO


def is_inside_region(point, region_points):
    """
    判断点是否在指定区域内
    """
    return cv2.pointPolygonTest(np.array(region_points), point, False) >= 0

def detect():

    model = YOLO("yolov8n.pt")
    cv2.namedWindow('region couter', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('region couter', 1280, 360)  # 设置宽高
    cap = cv2.VideoCapture('ultralytics/assets/a3.mp4')
    out_video = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (2560, 720))

    region_points = [(20, 20), (20, 600), (1200, 600), (1200, 20)]
    region_points_np = np.array(region_points)

    assert cap.isOpened(), "Error reading video file"
    while cap.isOpened():
        success, im0 = cap.read()
        if not success:
            print("Video frame is empty or video processing has been successfully completed.")
            break
        tracks = model.track(im0, persist=True)
        annotated_frame = tracks[0].plot()
        boxes = tracks[0].boxes.data
        count_class1 = 0
        count_class2 = 0
        for i, box in enumerate(boxes):
            x1, y1, x2, y2 = box[:4]
            x_center = (x1 + x2) / 2
            y_center = (y1 + y2) / 2
            center_point = (int(x_center), int(y_center))
            if is_inside_region(center_point, region_points):
                if box[-1] == 0:  # 类别1的标签.人
                    count_class1 += 1
                elif  box[-1] == 2:  # 类别2的标签,车
                    count_class2 += 1
        cv2.polylines(annotated_frame, [region_points_np], isClosed=True, color=(255, 0, 0), thickness=2)
        print("Number of objects in class 1:", count_class1)
        print("Number of objects in class 2:", count_class2)

        cv2.imshow("region couter", annotated_frame)
        out_video.write(annotated_frame)
        if cv2.waitKey(1) == ord('q'):
            break
    out_video.release()
    cap.release()
    cv2.destroyAllWindows()
if __name__ == '__main__':
    detect()

4. 实验结果

本实验可以计数多种类别

(1)区域计数实验

(2)如需把区域背景换成填充色,可将上边代码里的

python 复制代码
cv2.polylines(annotated_frame, [region_points_np], isClosed=True, color=(255, 0, 0), thickness=2)

替换为

python 复制代码
region_mask = np.zeros_like(annotated_frame)
cv2.fillPoly(region_mask, [region_points_np], color=(255, 0, 0))
# 使用透明度将填充后的区域与原始帧混合
alpha = 0.2  # 调整透明度
annotated_frame = cv2.addWeighted(annotated_frame, 1 - alpha, region_mask, alpha, 0)

(3)计数在图上显示

annotated_frame = cv2.addWeighted(annotated_frame, 1 - alpha, region_mask, alpha, 0)添加

python 复制代码
text1 = "count_class1:%d" % count_class1
text2 = "count_class2:%d" % count_class2
cv2.putText(annotated_frame, text1, (20, 30), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)
cv2.putText(annotated_frame, text2, (20, 60), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)

5. 完整代码

python 复制代码
import cv2
import numpy as np
from ultralytics import YOLO


def is_inside_region(point, region_points):
    """
    判断点是否在指定区域内
    """
    return cv2.pointPolygonTest(np.array(region_points), point, False) >= 0

def detect():

    model = YOLO("yolov8n.pt")
    cv2.namedWindow('region couter', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('region couter', 1280, 360)  # 设置宽高
    cap = cv2.VideoCapture('ultralytics/assets/a3.mp4')
    out_video = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (2560, 720))

    region_points = [(20, 20), (20, 600), (1200, 600), (1200, 20)]
    region_points_np = np.array(region_points)

    assert cap.isOpened(), "Error reading video file"
    while cap.isOpened():
        success, im0 = cap.read()
        if not success:
            print("Video frame is empty or video processing has been successfully completed.")
            break
        tracks = model.track(im0, persist=True)
        annotated_frame = tracks[0].plot()
        boxes = tracks[0].boxes.data
        count_class1 = 0
        count_class2 = 0
        for i, box in enumerate(boxes):
            x1, y1, x2, y2 = box[:4]
            x_center = (x1 + x2) / 2
            y_center = (y1 + y2) / 2
            center_point = (int(x_center), int(y_center))
            if is_inside_region(center_point, region_points):
                if box[-1] == 0:  # 类别1的标签.人
                    count_class1 += 1
                elif  box[-1] == 2:  # 类别2的标签,车
                    count_class2 += 1
        # cv2.polylines(annotated_frame, [region_points_np], isClosed=True, color=(255, 0, 0), thickness=2)
        region_mask = np.zeros_like(annotated_frame)
        cv2.fillPoly(region_mask, [region_points_np], color=(255, 0, 0))
        # 使用透明度将填充后的区域与原始帧混合
        alpha = 0.2  # 调整透明度
        annotated_frame = cv2.addWeighted(annotated_frame, 1 - alpha, region_mask, alpha, 0)

        text1 = "count_class1:%d" % count_class1
        text2 = "count_class2:%d" % count_class2
        cv2.putText(annotated_frame, text1, (20, 30), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)
        cv2.putText(annotated_frame, text2, (20, 60), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)
        print("Number of objects in class 1:", count_class1)
        print("Number of objects in class 2:", count_class2)

        cv2.imshow("region couter", annotated_frame)
        out_video.write(annotated_frame)
        if cv2.waitKey(1) == ord('q'):
            break
    out_video.release()
    cap.release()
    cv2.destroyAllWindows()
if __name__ == '__main__':
    detect()

6. 源码

可以去 Windows+YOLOV8环境配置 下载源码,然后把上边主代码贴进去运行即可

相关推荐
飞翔的佩奇1 天前
【完整源码+数据集+部署教程】鸡只与养殖场环境物品图像分割: yolov8-seg等50+全套改进创新点发刊_一键训练教程_Web前端展示
python·yolo·计算机视觉·数据集·yolov8·yolo11·鸡只与养殖场环境物品图像分割
夏雨不在低喃1 天前
YOLOv8目标检测融合RFLA提高小目标准确率
人工智能·yolo·目标检测
程序猿小D2 天前
【完整源码+数据集+部署教程】【智慧工地监控】建筑工地设备分割系统: yolov8-seg-efficientViT
python·yolo·计算机视觉·数据集·yolov8·yolo11·建筑工地设备分割系统
小雪狼3 天前
RV1126 RKNN环境搭建记录
rnn·yolo
nju_spy3 天前
计算机视觉 - 物体检测(二)单阶段:YOLO系列 + SSD
人工智能·yolo·目标检测·计算机视觉·ssd·r-cnn·端到端检测
码猩3 天前
YOLO通用无人机目标检测框架
人工智能·yolo·目标检测
Hcoco_me3 天前
YOLO入门教程(番外):计算机视觉数学、编程基础
人工智能·yolo·计算机视觉
IT古董4 天前
【第五章:计算机视觉-项目实战之目标检测实战】2.目标检测实战:中国交通标志检测-(4)YOLOv8训练与测试
yolo·目标检测·计算机视觉
钱彬 (Qian Bin)5 天前
企业级实战:构建基于Qt、C++与YOLOv8的模块化工业视觉检测系统(基于QML)
c++·qt·yolo·qml·工业质检·qt 5.15.2
编程武士6 天前
从50ms到30ms:YOLOv10部署中图像预处理的性能优化实践
人工智能·python·yolo·性能优化