/
global_utils.py
131 lines (93 loc) · 3.11 KB
/
global_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.stats
def get_backbone(name, state_dict=None):
if name == 'conv4':
from backbones import conv4
backbone = conv4()
elif name == 'resnet12':
from backbones import resnet12
backbone = resnet12()
else:
raise ValueError('Non-supported Backbone.')
if state_dict is not None:
backbone.load_state_dict(state_dict)
return backbone
class Averager():
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
class Averager_with_interval():
def __init__(self, confidence=0.95):
self.list = []
self.confidence = confidence
self.n = 0
def add(self, x):
self.list.append(x)
self.n += 1
def item(self, return_str=False):
mean, standard_error = np.mean(self.list), scipy.stats.sem(self.list)
h = standard_error * scipy.stats.t._ppf((1 + self.confidence) / 2, self.n - 1)
if return_str:
return '{0:.2f}; {1:.2f}'.format(mean * 100, h * 100)
else:
return mean
def count_acc(logits, labels):
_, predictions = torch.max(logits, dim=-1)
return torch.mean(predictions.eq(labels).float())
def set_reproducibility(seed=0):
import random
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_outputs_c_h(backbone, image_len):
c_dict = {
'conv4': 64,
'resnet12': 512
}
c = c_dict[backbone]
h_devisor_dict = {
'conv4': 16,
'resnet12': 16,
}
h = image_len // h_devisor_dict[backbone]
return c, h
def get_semantic_size(args):
semantic_size_list = []
for semantic_type in args.semantic_type:
if semantic_type == 'class_attributes':
if args.train_data == 'cub':
semantic_size_list.append(312)
elif semantic_type == 'image_attributes':
if args.train_data == 'sun':
semantic_size_list.append(102)
if not len(semantic_size_list) == len(args.semantic_type):
raise ValueError('Non-supported Semantic Type to the Dataset.')
if len(semantic_size_list) == 1:
return semantic_size_list[0]
else:
return semantic_size_list
def get_inputs_and_outputs(args, batch):
semantic_type_limitation = [
'class_attributes',
'image_attributes',
]
for semantic_type in args.semantic_type:
if semantic_type not in semantic_type_limitation:
raise ValueError('Non-supported Semantic Type.')
return_list = ['images', 'targets'] + args.semantic_type
if args.use_cuda:
return [batch[return_type].cuda(non_blocking=True) for return_type in return_list]
else:
return [batch[return_type] for return_type in return_list]