• paddledet 训练旋转目标检测 ppyoloe-r 训练自己的数据集


    1.数据转换
    labelme2coco,原来是labelme标注的points 通过opencv转为4个坐标

    # encoding=utf-8
    import argparse
    import collections
    import datetime
    import glob
    import json
    import os
    import os.path as osp
    import sys
    import uuid
    import cv2
    import imgviz
    import numpy as np
    
    import labelme
    
    try:
        import pycocotools.mask
    except ImportError:
        print("Please install pycocotools:\n\n    pip install pycocotools\n")
        sys.exit(1)
    
    
    def main():
        input_dir='G:/customer/visionary_s_3d_dete/zhixi'
        output_dir='dataset/zhixi'
        os.makedirs(output_dir)
        os.makedirs(osp.join(output_dir, "JPEGImages"))
    
        data = dict(
            images=[
                # license, url, file_name, height, width, date_captured, id
            ],
            annotations=[
                # segmentation, area, iscrowd, image_id, bbox, category_id, id
            ],
            categories=[
                # supercategory, id, name
            ],
        )
        data["categories"].append(
            dict(supercategory=None, id=0, name='sack',)
        )
    
        out_ann_file = osp.join(output_dir, "annotations.json")
        label_files = glob.glob(osp.join(input_dir, "*.json"))
        for image_id, filename in enumerate(label_files):
            print("Generating dataset from:", filename)
    
            label_file = labelme.LabelFile(filename=filename)
    
            base = osp.splitext(osp.basename(filename))[0]
            out_img_file = osp.join(output_dir, "JPEGImages", base + ".jpg")
    
            img = labelme.utils.img_data_to_arr(label_file.imageData)
            imgviz.io.imsave(out_img_file, img)
            data["images"].append(
                dict(
                    license=0,
                    url=None,
                    file_name=osp.relpath(out_img_file, osp.dirname(out_ann_file)),
                    height=img.shape[0],
                    width=img.shape[1],
                    date_captured=None,
                    id=image_id,
                )
            )
    
            masks = {}  # for area
            segmentations = collections.defaultdict(list)  # for segmentation
            for shape in label_file.shapes:
                points = shape["points"]
                label = shape["label"]
                group_id = shape.get("group_id")
                shape_type = shape.get("shape_type")
                mask = labelme.utils.shape_to_mask(
                    img.shape[:2], points, shape_type
                )
    
                if group_id is None:
                    group_id = uuid.uuid1()
    
                instance = (label, group_id)
    
                if instance in masks:
                    masks[instance] = masks[instance] | mask
                else:
                    masks[instance] = mask
    
                if shape_type == "rectangle":
                    (x1, y1), (x2, y2) = points
                    x1, x2 = sorted([x1, x2])
                    y1, y2 = sorted([y1, y2])
                    points = [x1, y1, x2, y1, x2, y2, x1, y2]
                if shape_type == "circle":
                    (x1, y1), (x2, y2) = points
                    r = np.linalg.norm([x2 - x1, y2 - y1])
                    # r(1-cos(a/2)) N>pi/arccos(1-x/r)
                    # x: tolerance of the gap between the arc and the line segment
                    n_points_circle = max(int(np.pi / np.arccos(1 - 1 / r)), 12)
                    i = np.arange(n_points_circle)
                    x = x1 + r * np.sin(2 * np.pi / n_points_circle * i)
                    y = y1 + r * np.cos(2 * np.pi / n_points_circle * i)
                    points = np.stack((x, y), axis=1).flatten().tolist()
                elif shape_type=="polygon":
                    points=np.float32(points)
                    rect = cv2.minAreaRect(points)  # 最小外接矩形
                    points = cv2.boxPoints(rect).flatten()
                    points=points.tolist()
                    # points = np.asarray(box).flatten().tolist()
    
                segmentations[instance].append(points)
            segmentations = dict(segmentations)
    
            for instance, mask in masks.items():
                mask = np.asfortranarray(mask.astype(np.uint8))
                mask = pycocotools.mask.encode(mask)
                area = float(pycocotools.mask.area(mask))
                bbox = pycocotools.mask.toBbox(mask).flatten().tolist()
    
                data["annotations"].append(
                    dict(
                        id=len(data["annotations"]),
                        image_id=image_id,
                        category_id=0,
                        segmentation=segmentations[instance],
                        area=area,
                        bbox=bbox,
                        iscrowd=0,
                    )
                )
        with open(out_ann_file, "w") as f:
            json.dump(data, f)
    
    
    if __name__ == "__main__":
        main()
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137

    2.修改data.yml文件
    这个路径要根据自己实际修改,实在不行就debug看看哪里出问题

    metric: RBOX
    num_classes: 15
    
    TrainDataset:
      !COCODataSet
        image_dir:
        anno_path: annotations/annotations.json
        dataset_dir: ../dataset/zhixi
        data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd', 'gt_poly']
    
    EvalDataset:
      !COCODataSet
        image_dir:
        anno_path: annotations/annotations.json
        dataset_dir: ../dataset/zhixi
        data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd', 'gt_poly']
    
    TestDataset:
      !ImageFolder
        anno_path: annotations/annotations.json
        dataset_dir: ../dataset/zhixi
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22

    3.训练报错
    RuntimeError: (PreconditionNotMet) The third-party dynamic library (cublas64_102.dll;cublas64_10.dll) that Paddle depends on is not configured correctly. (error code is 126)

    解决:路径为C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin

    在bin路径下将cublas64_100.dll重命名为cublas64_10.dll

    在bin路径下将cusolver64_100重命名为cusolver64_10
    ————————————————
    版权声明:本文为CSDN博主「李伯爵的指间沙」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
    原文链接:https://blog.csdn.net/m0_37690102/article/details/123474171

  • 相关阅读:
    机器学习课程复习——隐马尔可夫
    CyberDAO:web3时代的引领者
    k8s组件和网络插件挂掉,演示已有的pod是否正常运行
    0×01 Vulnhub靶机渗透总结之 Kioptrix: Level 1 (#1) 古老的Apache Samba VULN
    maven编译,本地jar存在确报找不到
    Linux_用户组管理
    【无标题】
    Linux——Bash脚本基本用法总结
    浅学JAVA泛型一:泛型的基础知识
    [YOLOV7] Win10+Anaconda+Pytorch 部署YOLOv7(含踩坑解决方案)
  • 原文地址:https://blog.csdn.net/qq_33228039/article/details/128112365