-
Notifications
You must be signed in to change notification settings - Fork 0
/
eval_segmentation.py
112 lines (85 loc) · 2.9 KB
/
eval_segmentation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import numpy as np
import matplotlib.pyplot as plt
import glob
#import imageio
import torch
import torch.nn.functional as F
import torch.optim as optim
import os
from PIL import Image
import os.path as osp
from dataset.cityscapes import cityscapesDataSet
from torch.utils import data
from torch.autograd import Variable
from model.deeplab_multi import *
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
dtype = torch.cuda.FloatTensor #GPU
data_dir = 'PATH'
data_list = 'dataset/val.txt' #List of validation images
batch_size = 1
num_steps = 500 #Number of images in data_list
input_size_target = '1024,512' #Size should match size in the train set
eval_set = 'val'
num_workers = 1
IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)
w, h = map(int, input_size_target.split(','))
input_size_target = (w, h)
targetloader = data.DataLoader(cityscapesDataSet(data_dir, data_list, max_iters=num_steps * batch_size, crop_size=input_size_target,
scale=False, mean=IMG_MEAN, set='val'), batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)
targetloader_iter = enumerate(targetloader)
num_classes = 19
colors = [ [128,64,128],
[244,35,232],
[70,70,70],
[102,102,156],
[190,153,153],
[153,153,153],
[250,170,30],
[220,220,0],
[107,142,35],
[152,251,152],
[70,130,180],
[220,20,60],
[255,0,0],
[0,0,142],
[0,0,70],
[0,60,100],
[0,80,100],
[0,0,230],
[119,11,32] ]
#ignoring void class
def fast_hist(a,b,n):
k = (a>=0) & (a<n)
return np.bincount(n*a[k].astype(int)+b[k], minlength=n**2).reshape(n,n)
def per_class_iu(hist):
return np.diag(hist)/(hist.sum(1)+hist.sum(0)-np.diag(hist))
#Load our trained model
net = torch.load('model.pth', map_location='cpu')
net = net.cuda()
hist = np.zeros((num_classes,num_classes))
for iteration in range(0,num_steps):
_, batch = targetloader_iter.__next__()
images, labels, name = batch
images = Variable(images).cuda()
pred, a, b = net(images)
pred = pred.detach()
pred = pred.cpu()
pred = pred.numpy()
pred = pred[0,:,:,:]
pred = np.argmax(pred,0)
labels = labels.cpu()
labels = labels.numpy()
labels = labels[0,:,:]
labels = labels.astype(np.int)
hist += fast_hist(labels.flatten(), pred.flatten(), num_classes)
mIoUs = per_class_iu(hist)
print('===> mIoU: ' + str(round(np.nanmean(mIoUs) * 100, 2)))
torch.cuda.empty_cache() #clear cached memory
print(iteration)
mIoUs = per_class_iu(hist)
for ind_class in range(num_classes):
print('===> Class '+str(ind_class)+':\t'+str(round(mIoUs[ind_class] * 100, 2)))
print('===> mIoU: ' + str(round(np.nanmean(mIoUs) * 100, 2)))
print('===> Accuracy Overall: ' + str(np.diag(hist).sum() / hist.sum() * 100))
acc_percls = np.diag(hist) / (hist.sum(1) + 1e-8)