【YOLO】常用脚本

目录

VOC转YOLO

python 复制代码
import os
import xml.etree.ElementTree as ET


def convert(size, box):
    dw = 1. / size[0]
    dh = 1. / size[1]
    x = (box[0] + box[1]) / 2.0
    y = (box[2] + box[3]) / 2.0
    w = box[1] - box[0]
    h = box[3] - box[2]
    x = x * dw
    w = w * dw
    y = y * dh
    h = h * dh
    return (x, y, w, h)


def convert_annotation(xml_file, output_dir, labels):
    # 加载XML文件
    tree = ET.parse(xml_file)
    root = tree.getroot()

    # 获取图像尺寸
    size = root.find('size')
    w = int(size.find('width').text)
    h = int(size.find('height').text)

    # 初始化YOLO格式的标注字符串
    result_str = ""

    # 遍历所有对象
    for obj in root.iter('object'):
        difficult = obj.find('difficult')
        if difficult is not None:
            difficult = difficult.text
            if int(difficult) == 1:
                continue
        cls = obj.find('name').text
        if cls not in labels:
            continue
        cls_id = labels.index(cls)
        xmlbox = obj.find('bndbox')
        b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text),
             float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
        bb = convert((w, h), b)
        result_str = result_str + " " + " ".join([str(a) for a in bb]) + " " + str(cls_id)

        # 写入YOLO格式的标注文件
    file_name = os.path.splitext(os.path.basename(xml_file))[0]
    with open(os.path.join(output_dir, file_name + ".txt"), "w") as f:
        f.write(result_str.strip())


def main(voc_dir, output_dir, labels):
    # 遍历Annotations文件夹
    annotations_dir = os.path.join(voc_dir, "Annotations")
    for xml_file in os.listdir(annotations_dir):
        if xml_file.endswith(".xml"):
            xml_path = os.path.join(annotations_dir, xml_file)
            convert_annotation(xml_path, output_dir, labels)


if __name__ == "__main__":
	# VOC数据集根目录包含Annotations、JPEGImages等
    voc_dir = "path_to_your_voc_dataset"  
    # 存放转换后的YOLO格式标注文件
    output_dir = "path_to_your_yolo_annotations"
    # 数据集包含类别  
    labels = ['nodule']
    main(voc_dir, output_dir, labels)

划分训练集、测试集与验证集

python 复制代码
import os
import random
from shutil import copyfile


def split_dataset(image_folder, txt_folder, output_folder, split_ratio=(0.8, 0.1, 0.1)):
    # Ensure output folders exist
    for dataset in ['train', 'val', 'test']:
        if not os.path.exists(os.path.join(output_folder, dataset, 'images')):
            os.makedirs(os.path.join(output_folder, dataset, 'images'))
        if not os.path.exists(os.path.join(output_folder, dataset, 'labels')):
            os.makedirs(os.path.join(output_folder, dataset, 'labels'))

    # Get list of image files
    image_files = [f for f in os.listdir(image_folder) if f.endswith(('.jpg', '.jpeg', '.png'))]
    random.shuffle(image_files)

    num_images = len(image_files)
    num_train = int(split_ratio[0] * num_images)
    num_val = int(split_ratio[1] * num_images)

    train_images = image_files[:num_train]
    val_images = image_files[num_train:num_train + num_val]
    test_images = image_files[num_train + num_val:]

    # Copy images to respective folders
    for dataset, images_list in zip(['train', 'val', 'test'], [train_images, val_images, test_images]):
        for image_file in images_list:
            image_path = os.path.join(image_folder, image_file)
            copyfile(image_path, os.path.join(output_folder, dataset, 'images', image_file))
            txt_file = os.path.splitext(image_file)[0] + '.txt'
            txt_path = os.path.join(txt_folder, txt_file)

            # Copy corresponding txt file if exists
            if os.path.exists(txt_path):
                copyfile(txt_path, os.path.join(output_folder, dataset, 'labels', txt_file))


if __name__ == "__main__":
	# 图片路径
    image_folder_path = "./JPEGImages"
    # 标签路径
    txt_folder_path = "./Labels"
    # 划分后数据集路径
    output_dataset_path = "./dataset"

    split_dataset(image_folder_path, txt_folder_path, output_dataset_path)
相关推荐
哈哈你是真的厉害4 分钟前
CANN生态核心算子库合集:赋能AIGC多模态落地的全链路算力支撑
人工智能·aigc·cann
imbackneverdie4 分钟前
2026国自然申请书模板大改版,科研人员如何应对?
人工智能·自然语言处理·aigc·科研·学术·国自然·国家自然科学基金
哈哈你是真的厉害4 分钟前
驾驭万亿参数 MoE:深度剖析 CANN ops-transformer 算子库的“核武库”
人工智能·深度学习·aigc·transformer
忆~遂愿4 分钟前
CANN ATVOSS 算子库深度解析:基于 Ascend C 模板的 Vector 算子子程序化建模与融合优化机制
大数据·人工智能
喵叔哟12 分钟前
02-YOLO-v8-v9-v10工程差异对比
人工智能·yolo·机器学习
玄同76514 分钟前
SQLite + LLM:大模型应用落地的轻量级数据存储方案
jvm·数据库·人工智能·python·语言模型·sqlite·知识图谱
心疼你的一切14 分钟前
模态交响:CANN驱动的跨模态AIGC统一架构
数据仓库·深度学习·架构·aigc·cann
L、21815 分钟前
CANN 内存管理深度解析:高效利用显存,突破 AI 推理瓶颈
人工智能
聊聊科技16 分钟前
原创音乐人使用AI编曲软件制作伴奏,编曲用什么音源好听
人工智能
爱吃烤鸡翅的酸菜鱼16 分钟前
CANN ops-nn卷积算子深度解析与性能优化
人工智能·性能优化·aigc