coco_eval.py
3.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pycocotools.cocoeval import COCOeval
from tensorflow import keras
import numpy as np
import json
import progressbar
assert(callable(progressbar.progressbar)), "Using wrong progressbar module, install 'progressbar2' instead."
def evaluate_coco(generator, model, threshold=0.05):
""" Use the pycocotools to evaluate a COCO model on a dataset.
Args
generator : The generator for generating the evaluation data.
model : The model to evaluate.
threshold : The score threshold to use.
"""
# start collecting results
results = []
image_ids = []
for index in progressbar.progressbar(range(generator.size()), prefix='COCO evaluation: '):
image = generator.load_image(index)
image = generator.preprocess_image(image)
image, scale = generator.resize_image(image)
if keras.backend.image_data_format() == 'channels_first':
image = image.transpose((2, 0, 1))
# run network
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
# correct boxes for image scale
boxes /= scale
# change to (x, y, w, h) (MS COCO standard)
boxes[:, :, 2] -= boxes[:, :, 0]
boxes[:, :, 3] -= boxes[:, :, 1]
# compute predicted labels and scores
for box, score, label in zip(boxes[0], scores[0], labels[0]):
# scores are sorted, so we can break
if score < threshold:
break
# append detection for each positively labeled class
image_result = {
'image_id' : generator.image_ids[index],
'category_id' : generator.label_to_coco_label(label),
'score' : float(score),
'bbox' : box.tolist(),
}
# append detection to results
results.append(image_result)
# append image to list of processed images
image_ids.append(generator.image_ids[index])
if not len(results):
return
# write output
json.dump(results, open('{}_bbox_results.json'.format(generator.set_name), 'w'), indent=4)
json.dump(image_ids, open('{}_processed_image_ids.json'.format(generator.set_name), 'w'), indent=4)
# load results in COCO evaluation tool
coco_true = generator.coco
coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(generator.set_name))
# run COCO evaluation
coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats