-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy patheval_aerial.py
112 lines (96 loc) · 3.86 KB
/
eval_aerial.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, time
from os.path import basename
import matplotlib.pyplot as plt
import glob
import numpy as np
import cv2 as cv
import tensorflow as tf
import argparse
import os
from sklearn.metrics import confusion_matrix
import utils
label_values = ['Unlabeled', 'building']
def metrics(predictions, gts):
""" Compute the metrics from the RGB-encoded predictions and ground truthes
Args:
predictions (array list): list of RGB-encoded predictions (2D maps)
gts (array list): list of RGB-encoded ground truthes (2D maps, same dims)
"""
prediction_labels = np.concatenate([l.flatten() for l in predictions])
gt_labels = np.concatenate([l.flatten() for l in gts])
cm = confusion_matrix(
gt_labels,
prediction_labels,
range(len(label_values)))
print "Confusion matrix :"
print cm
print "---"
# Compute global accuracy
accuracy = sum([cm[x][x] for x in range(len(cm))])
total = sum(sum(cm))
print "{} pixels processed".format(total)
print "Total accuracy : {}%".format(accuracy * 100 / float(total))
print "---"
# Compute F1 score
F1Score = np.zeros(len(label_values))
for i in xrange(len(label_values)):
try:
F1Score[i] = 2. * cm[i,i] / (np.sum(cm[i,:]) + np.sum(cm[:,i]))
except:
# Ignore exception if there is no element in class i for test set
pass
print "F1Score :"
for l_id, score in enumerate(F1Score):
print "{}: {}".format(label_values[l_id], score)
print "---"
# Compute kappa coefficient
total = np.sum(cm)
pa = np.trace(cm) / float(total)
pe = np.sum(np.sum(cm, axis=0) * np.sum(cm, axis=1)) / float(total*total)
kappa = (pa - pe) / (1 - pe);
print "Kappa: " + str(kappa)
if __name__ == '__main__':
result_dir = 'prediction_valid_#107-0089'
pred_label_list = []
gt_label_list = []
BASE_FOLDER = './data/AerialImageDataset/valid/gt/'
# for img_fname in glob.glob('%s/pred_0_*.tif.npy' % result_dir):
# pred_label = np.load(img_fname)
# pred_label = pred_label>=0.5
# img_fname = os.path.basename(img_fname)[7:].split('.npy')[0]
# print(img_fname)
# gt_label = cv.imread(BASE_FOLDER + img_fname, cv.IMREAD_GRAYSCALE)
# gt_label /= gt_label.max()
# print(np.unique(pred_label),np.unique(gt_label))
# pred_label_list.append(pred_label)
# gt_label_list.append(gt_label)
for img_fname in glob.glob('%s/*.tif' % BASE_FOLDER):
gt_label = cv.imread(img_fname, cv.IMREAD_GRAYSCALE)
gt_label /= gt_label.max()
img_fname = os.path.basename(img_fname)
pred_label = np.load(result_dir + '/pred_0_'+img_fname+'.npy')
pred_label = pred_label>=0.5
print(np.unique(pred_label),np.unique(gt_label))
pred_label_list.append(pred_label)
gt_label_list.append(gt_label)
print "Computing metrics..."
# metrics(pred_label_list, gt_label_list)
pred = np.concatenate([l.flatten() for l in pred_label_list])
label = np.concatenate([l.flatten() for l in gt_label_list])
TP = np.float(np.count_nonzero(pred * label)) + 1.0
TN = np.float(np.count_nonzero((pred-1) * (label-1)))
FP = np.float(np.count_nonzero(pred * (label - 1))) + 1.0
prec = TP / (TP + FP)
FN = np.float(np.count_nonzero((pred - 1) * label)) + 1.0
rec = TP / (TP + FN)
iou = TP / (TP+FN+FP)
f1 = np.divide(2 * prec * rec, (prec + rec))
acc = (TP+TN)/(TP+TN+FP+FN)
# print(prediction_labels.shape,gt_labels.shape)
# acc = utils.compute_avg_accuracy(prediction_labels,gt_labels)
# rec = utils.recall(prediction_labels,gt_labels)
# prec = utils.precision(prediction_labels,gt_labels)
# iou = utils.compute_mean_iou(prediction_labels,gt_labels)
print('acc,rec,prec,f1,iou:',acc,prec,rec,f1,iou)