-
Notifications
You must be signed in to change notification settings - Fork 0
/
pytorch_PEM_test.py
145 lines (121 loc) · 5.22 KB
/
pytorch_PEM_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import os
import cPickle
import torch.nn as nn
import torch
from PEM_model import PEM
import argparse
import torch.optim as optim
# import PEM_load_data
import pytorch_PEM_load_data as PEM_load_data
import numpy as np
import pandas as pd
def abs_smooth(x):
'''
Smoothed absolute function. Useful to compute an L1 smooth error.
:param x:
:return:
'''
absx = torch.abs(x)
minx = torch.min(absx, torch.ones_like(absx))
r = 0.5 * ((absx - 1) * minx + absx)
return r
def PEM_loss(anchors_iou, match_iou):
u_ratio_m = 1
u_ratio_l = 2
# iou regressor
u_hmask = (match_iou > 0.6).type(torch.cuda.FloatTensor)
u_mmask = ((match_iou >= 0.2) & (match_iou <= 0.6)).type(torch.cuda.FloatTensor)
u_lmask = (match_iou < 0.2).type(torch.cuda.FloatTensor)
num_h = u_hmask.sum()
num_m = u_mmask.sum()
num_l = u_lmask.sum()
r_m = u_ratio_m * num_h / num_m
r_m = torch.min(r_m, torch.ones_like(r_m))
u_smmask = torch.rand(u_hmask.size()[0]).cuda()
# u_smmask = torch.FloatTensor(u_smmask).cuda()
u_smmask = u_smmask * u_mmask
u_smmask = (u_smmask > (1.0 - r_m)).type(torch.cuda.FloatTensor)
r_l = u_ratio_l * num_h/num_l
r_l = torch.min(r_l, torch.ones_like(r_l))
u_slmask = torch.rand(u_hmask.size()[0]).cuda()
# u_slmask = torch.FloatTensor(u_slmask).cuda()
u_slmask = u_slmask * u_lmask
u_slmask = (u_slmask > (1.0 - r_l)).type(torch.cuda.FloatTensor)
iou_weights = u_hmask + u_smmask + u_slmask
iou_loss = abs_smooth(match_iou - anchors_iou)
num_nonzero = iou_weights.sum()
weighted_loss = iou_loss * iou_weights
iou_loss = weighted_loss.sum() / num_nonzero
return iou_loss
def run_pem(pem, X, Y_iou):
net_output = pem(X)
anchors_iou = net_output.view(-1)
loss = 10 * PEM_loss(anchors_iou, Y_iou)
return loss
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate for Critic, default=0.00005')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--niter', type=int, default=20, help='number of epochs to train for')
parser.add_argument('--batchsize', type=int, default=16, help='input batch size')
parser.add_argument('--experiment', default=None, help='Where to store samples and models')
parser.add_argument('--hiddensize', type=int, default=128, help='the hidden size of network')
parser.add_argument('--splittype', default=None, help='split type (train, validation, test)')
opt = parser.parse_args()
return opt
if __name__ == '__main__':
opt = parse_arguments()
batch_size = opt.batchsize
num_epoches = opt.niter
if opt.experiment == None:
experiment_type = ''
opt.experiment = './pytorch_models'
else:
experiment_type = opt.experiment
opt.experiment = os.path.join('./pytorch_models', opt.experiment)
pem = PEM(opt.hiddensize)
model_path = os.path.join(opt.experiment, 'PEM/pem_model_best.pth')
pem.load_state_dict(torch.load(model_path))
pem.cuda()
gt_path = '../../datasets/virat/bsn_dataset/stride_100_interval_300/gt_annotations.pkl'
split_path = '../../datasets/virat/bsn_dataset/stride_100_interval_300/split.pkl'
train_dict, val_dict, test_dict = PEM_load_data.getDatasetDict(gt_path, split_path)
FullDict = PEM_load_data.getTestData(train_dict, val_dict, test_dict, opt.splittype, experiment_type)
if opt.splittype == 'train':
video_list = train_dict.keys()
video_dict = train_dict
elif opt.splittype == 'validation':
video_list = val_dict.keys()
video_dict = val_dict
elif opt.splittype == 'test':
video_list = test_dict.keys()
video_dict = test_dict
# batch_video_list = PEM_load_data.getBatchList(val_dict, batch_size)
batch_video_list = PEM_load_data.getBatchList(video_dict, batch_size)
# video_list = val_dict.keys()
for idx in range(len(video_list)):
video_name = video_list[idx]
prop_dict = FullDict[video_name]
# print prop_dict
batch_feature, batch_iou_list, batch_ioa_list = \
PEM_load_data.prop_dict_data({"data": prop_dict})
X_feature = torch.FloatTensor(batch_feature).cuda()
out_score = pem(X_feature)
anchors_iou = out_score.view(-1)
iou_score = anchors_iou.data.cpu().numpy()
xmin_list = prop_dict["xmin"]
xmax_list = prop_dict["xmax"]
xmin_score_list = prop_dict["xmin_score"]
xmax_score_list = prop_dict["xmax_score"]
latentDf = pd.DataFrame()
latentDf["xmin"] = xmin_list
# print len(xmin_list), len(xmax_list), len(xmin_score_list), len(xmax_score_list)
# print iou_score.shape
latentDf["xmax"] = xmax_list
latentDf["xmin_score"] = xmin_score_list
latentDf["xmax_score"] = xmax_score_list
latentDf["iou_score"] = iou_score
# latentDf.to_csv("../../output/PEM_results/" + video_name + ".csv", index=False)
latentDf.to_csv(os.path.join('../../output', experiment_type, 'PEM_results/{}.csv'.format(video_name)),
index=False)