1 darknet源码训练
链接:https://blog.csdn.net/weixin_45702256/article/details/111677711
2 将用与训练的数据集转为json形式
我的数据集为自制VOC数据集,对数据集进行处理
1将测试集等分开
根据./ImageSets/Main文件中的train等txt文件,将数据集分开为test,train,val三个文件,代码如下
#某一个txt文本中的数字存的是图片的名字,
#要把这些名字的图片保存到另一个文件夹中
#修改两处,注意自己建立文件
from PIL import Image
import os
f3 = open("/media/hkd/f22a2591-f8d6-4a4e-b02c-1ed94ed27262/dell/wuyahui/yolov4/darknet-master/build/darknet/x64/myData/ImageSets/Main/val.txt",'r') #train文件所在路径
for line2 in f3.readlines():
line3=line2[:-1] #读取所有数字 000000
xmldir = '/media/hkd/f22a2591-f8d6-4a4e-b02c-1ed94ed27262/dell/wuyahui/yolov4/darknet-master/build/darknet/x64/myData/Annotations/'
#所有的xml文件绝对路径
savedir = '/media/hkd/f22a2591-f8d6-4a4e-b02c-1ed94ed27262/dell/wuyahui/yolov4/darknet-master/build/darknet/x64/myData/val/'
#将用于train的xml文件提取出来的绝对路径
xmllist = os.listdir(xmldir)
for xml in xmllist:
# if '.xml' in xml:
if '.xml' in xml:
if line3 in xml:
fo = open(savedir + '/' + '{}'.format(xml), 'w')
print('{}'.format(xml))
fi = open(xmldir + '/' + '{}'.format(xml), 'r')
content = fi.readlines()
for line in content:
fo.write(line)
fo.close()
print('替换成功')
f3.close()
2 根据train等文件生成json文件
#coding:utf-8
import sys
import os
import json
import xml.etree.ElementTree as ET
START_BOUNDING_BOX_ID = 1
#注意下面的dict存储的是实际检测的类别,需要根据自己的实际数据进行修改
#这里以自己的数据集person和hat两个类别为例,如果是VOC数据集那就是20个类别
#注意类别名称和xml文件中的标注名称一致
PRE_DEFINE_CATEGORIES = {"[person]":1,"chair":2,"sofa":3}
#注意按照自己的数据集名称修改编号和名称
def get(root, name):
vars = root.findall(name)
return vars
def get_and_check(root, name, length):
vars = root.findall(name)
if len(vars) == 0:
raise NotImplementedError('Can not find %s in %s.'%(name, root.tag))
if length > 0 and len(vars) != length:
raise NotImplementedError('The size of %s is supposed to be %d, but is %d.'%(name, length, len(vars)))
if length == 1:
vars = vars[0]
return vars
def get_filename_as_int(filename):
try:
filename = os.path.splitext(filename)[0]
return int(filename)
except:
raise NotImplementedError('Filename %s is supposed to be an integer.'%(filename))
def convert(xml_dir, json_file):
xmlFiles = os.listdir(xml_dir)
json_dict = {"images":[], "type": "instances", "annotations": [],
"categories": []}
categories = PRE_DEFINE_CATEGORIES
bnd_id = START_BOUNDING_BOX_ID
num = 0
for line in xmlFiles:
# print("Processing %s"%(line))
num +=1
if num%50==0:
print("processing ",num,"; file ",line)
xml_f = os.path.join(xml_dir, line)
tree = ET.parse(xml_f)
root = tree.getroot()
## The filename must be a number
filename = line[:-4]
image_id = get_filename_as_int(filename)
size = get_and_check(root, 'size', 1)
width = int(get_and_check(size, 'width', 1).text)
height = int(get_and_check(size, 'height', 1).text)
# image = {'file_name': filename, 'height': height, 'width': width,
# 'id':image_id}
image = {'file_name': (filename+'.jpg'), 'height': height, 'width': width,
'id':image_id}
json_dict['images'].append(image)
## Cruuently we do not support segmentation
# segmented = get_and_check(root, 'segmented', 1).text
# assert segmented == '0'
for obj in get(root, 'object'):
category = get_and_check(obj, 'name', 1).text
if category not in categories:
new_id = len(categories)
categories[category] = new_id
category_id = categories[category]
bndbox = get_and_check(obj, 'bndbox', 1)
xmin = int(get_and_check(bndbox, 'xmin', 1).text) - 1
ymin = int(get_and_check(bndbox, 'ymin', 1).text) - 1
xmax = int(get_and_check(bndbox, 'xmax', 1).text)
ymax = int(get_and_check(bndbox, 'ymax', 1).text)
assert(xmax > xmin)
assert(ymax > ymin)
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {'area': o_width*o_height, 'iscrowd': 0, 'image_id':
image_id, 'bbox':[xmin, ymin, o_width, o_height],
'category_id': category_id, 'id': bnd_id, 'ignore': 0,
'segmentation': []}
json_dict['annotations'].append(ann)
bnd_id = bnd_id + 1
for cate, cid in categories.items():
cat = {'supercategory': 'none', 'id': cid, 'name': cate}
json_dict['categories'].append(cat)
json_fp = open(json_file, 'w')
json_str = json.dumps(json_dict)
json_fp.write(json_str)
json_fp.close()
if __name__ == '__main__':
folder_list= ["train","val","test"]
#注意更改base_dir为本地实际图像和标注文件路径
base_dir = "/media/hkd/f22a2591-f8d6-4a4e-b02c-1ed94ed27262/dell/wuyahui/yolov4/darknet-master/build/darknet/x64/myData/"
#修改为自己的路径
for i in range(3):
folderName = folder_list[i]
xml_dir = base_dir + folderName
json_dir = base_dir + folderName + "/instances_" + folderName + ".json"
print("deal: ",folderName)
print("xml dir: ",xml_dir)
print("json file: ",json_dir)
convert(xml_dir,json_dir)
结果为instances_train.json,instances_val.json,instances_test.json
3根据训练权重生成coco_results.json文件
./darknet detector valid cfg/my_data.data cfg/my_yolov4.cfg my_yolov4_best.weights
注意:在运行该文件时注意my_data.data文件内容,最后添加eval=coco
,否则只生成每一类的结果
4 cocoAPI 评估
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab,json
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
def get_img_id(file_name):
ls = []
myset = []
annos = json.load(open(file_name, 'r'))
for anno in annos:
ls.append(anno['image_id'])
myset = {}.fromkeys(ls).keys()
return myset
if __name__ == '__main__':
annType = ['segm', 'bbox', 'keypoints'] # set iouType to 'segm', 'bbox' or 'keypoints'
annType = annType[1] # specify type here
cocoGt_file = './instances_test.json'
cocoGt = COCO(cocoGt_file) # 取得标注集中coco json对象
cocoDt_file = './coco_results.json'
imgIds = get_img_id(cocoDt_file)
print (len(imgIds))
cocoDt = cocoGt.loadRes(cocoDt_file) # 取得结果集中image json对象
imgIds = sorted(imgIds) # 按顺序排列coco标注集image_id
imgIds = imgIds[0:60] # 标注集中的image数据
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds