绘制PR曲线[通俗易懂]

绘制PR曲线[通俗易懂]一 获取 txt 文件 运行 darknet 官方代码中的 darknet detector valid data cfg weight 指令 例如 darknet exe detector valid data koujian koujian data cfg yolov3 tiny11 cfg backup yolov3 tiny11 last weights

一、 获取txt文件

运行darknet官方代码中的darknet detector valid data cfg weight指令(例如:
darknet.exe detector valid data/koujian/koujian.data cfg/yolov3-tiny11.cfg backup/yolov3-tiny11_last.weights),可以在result/目录下得到网络检测的输出txt文件:包括检测的图像名字、类别、概率、边界框位置(左上角和右下角):

二.新建两个文件:

reval_voc_py3.py

#!/usr/bin/env python

import os, sys, argparse
import numpy as np
import _pickle as cPickle
from voc_eval_py3 import voc_eval
import matplotlib.pyplot as plt

def do_python_eval(label_path, valid_file, classes, output_dir = 'results'):
cachedir = os.path.join('./', 'annotations_cache')
aps = []
use_07_metric = False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))

if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(classes):
if cls == '__background__':
continue
rec, prec, ap = voc_eval(
label_path,
valid_file, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
cPickle.dump({
'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))


fr = open(cls + '_pr.pkl','rb')
inf = cPickle.load(fr)
fr.close()

x=inf['rec']
y=inf['prec']
fig = plt.figure(1, dpi=160)
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, label='PR')
#ax.plot(result['Avg Recall'].values, label='Avg Recall')
#plt.grid()
ax.legend(loc='upper right')
ax.set_ylim([0.6, 1.04])
ax.set_xlim([0.0, 1.05])
ax.set_title('PR curves')
ax.set_xlabel('recall')
ax.set_ylabel('precision')
ax.spines['top'].set_visible(False) #去掉上边框
ax.spines['right'].set_visible(False) #去掉右边框
fig.savefig('PR')
plt.savefig("PR.svg", format="svg")
''' x=inf['rec'] y=inf['prec'] plt.figure() plt.xlabel('recall') plt.ylabel('precision') plt.title('PR cruve') plt.ylim([0.6, 1.05]) plt.plot(x,y, label='PR') plt.legend(loc= 'upper right') plt.show() print('AP:',inf['ap']) '''
if __name__ == '__main__':
label_path = r'D:\darknet\darknet-master\build\darknet\x64\data\koujian\valid' #label文件夹(验证集所在位置)
valid_file = r'comp4_det_test_koujian.txt' #valid命令生成的txt文件,在result/目录下。
name_path = r'D:\darknet\darknet-master\build\darknet\x64\data\koujian\koujian.names' #name文件
output_dir = os.path.abspath('./') #pkl保存路径
with open(name_path, 'r') as f:
lines = f.readlines()
classes = [t.strip('\n') for t in lines]
print('Evaluating detections')
do_python_eval(label_path, valid_file, classes, output_dir)

其中需要修改:
(1). label_path # label文件夹,标注txt和图像应在同一目录下
(2). valid_file # valid命令生成的txt文件,在result/目录下。
(3). name_path # name文件
(4). output_dir # 生成的pkl保存路径

2 voc_eval_py3.py

import xml.etree.ElementTree as ET
import os
import _pickle as cPickle
import numpy as np
import cv2

def parse_rec(label_path, label_name):
objects = []
label_file = os.path.join(label_path, label_name + '.txt')
img_file = os.path.join(label_path, label_name + '.jpg')
height, width, _ = cv2.imread(img_file).shape
with open(label_file) as f:
for line in f.readlines():
obj_struct = {
}
obj_struct['name'] = 'koujian' #需要修改成自己的names
obj_struct['difficult'] = int(0)
center_x, center_y, width_b, height_b =[float(x) for x in line.split()[1:]]
obj_struct['bbox'] = [int(center_x * width - width * width_b / 2.0),
int(center_y * height - height * height_b / 2.0),
int(center_x * width + width * width_b / 2.0),
int(center_y * height + height * height_b / 2.0)]
objects.append(obj_struct)
return objects

def voc_ap(rec, prec, use_07_metric=False):
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))

# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]

# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap

def voc_eval(label_path,
detpath,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False):
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')

label_file = []
for f in os.listdir(label_path):
file, tmp = f.split('.')
if tmp == 'txt':
label_file.append(file)

if not os.path.isfile(cachefile):
# load annots
recs = {
}
for label_name in label_file:
recs[label_name] = parse_rec(label_path, label_name)
with open(cachefile, 'wb') as f:
cPickle.dump(recs, f)
else:
# load
print('!!! cachefile = ',cachefile)
with open(cachefile, 'rb') as f:
recs = cPickle.load(f)

# extract gt objects for this class
class_recs = {
}
npos = 0 #修改
for label_name in label_file:
# for imagename in imagenames:
R = [obj for obj in recs[label_name] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[label_name] = {
'bbox': bbox,
'difficult': difficult,
'det': det}

# read dets
detfile = detpath
# detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()

splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])

# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
# print(image_ids)
# print(nd)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)

if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih

# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)

overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)

if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.

# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)

return rec, prec, ap

三、运行

将以上valid命令生成的txt文件和py文件放在同一个文件夹下,在python中终端运行reval_voc_py.py,可在该文件夹下得到一个pkl文件,名字为你检测的物体名字。如果没错的话应该会直接在终端生成一个PR曲线图。
当然,你也可以根据上面生成的这个pkl文件,再新建一个PR_draw.py文件:

import _pickle as cPickle
import matplotlib.pyplot as plt
fr = open('koujian_pr.pkl','rb')#这里open中第一个参数需要修改成自己生产的pkl文件
#fr1= open('quexian_pr.pkl','rb')
inf = cPickle.load(fr)

fr.close()


x=inf['rec']
y=inf['prec']

plt.figure()
plt.xlabel('recall')
plt.ylabel('precision')
plt.title('PR cruve')
plt.plot(x,y, label='PR')
plt.legend(loc='upper right')
plt.show()

print('AP:',inf['ap'])

运行该文件,同样可以得到PR曲线图。

参考链接

https://blog.csdn.net/Mr_kuilei/article/details/105641774
https://blog.csdn.net/qq_33350808/article/details/83178002

编程小号
上一篇 2025-03-03 07:40
下一篇 2025-03-15 19:01

相关推荐

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。
如需转载请保留出处:https://bianchenghao.cn/hz/140547.html