-
Notifications
You must be signed in to change notification settings - Fork 3
/
engine.py
182 lines (156 loc) · 7.95 KB
/
engine.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
# ------------------------------------------------------------------------
# Copyright (c) Hitachi, Ltd. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Train and eval functions used in main.py
"""
import math
import os
import pickle
import sys
from typing import Iterable
import numpy as np
import copy
import itertools
import torch
import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
from datasets.hico_eval import HICOEvaluator, HICOEvaluatorBox
from datasets.vcoco_eval import VCOCOEvaluator
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, output_dir: str, max_norm: float = 0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
if hasattr(criterion, 'loss_labels'):
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
else:
metric_logger.add_meter('obj_class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 200
for batch in metric_logger.log_every(data_loader, print_freq, header):
samples, targets, depth, spmap, mask_part, img_size, id = batch
samples = samples.to(device)
if depth is not None:
depth = depth.to(device)
if spmap is not None:
spmap = spmap.to(device)
if mask_part is not None:
mask_part = mask_part.to(device)
if img_size is not None:
img_size = torch.stack(img_size, 0)
img_size = img_size.to(device)
targets = [{k: v.to(device) for k, v in t.items() if k!="id" and k!="img_id"} for t in targets]
outputs = model(samples, depth, spmap, mask_part, img_size, targets)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
if hasattr(criterion, 'loss_labels'):
metric_logger.update(class_error=loss_dict_reduced['class_error'])
else:
metric_logger.update(obj_class_error=loss_dict_reduced['obj_class_error'])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate_hoi(dataset_file, model, postprocessors, data_loader, subject_category_id, device):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
preds = []
gts = []
indices = []
for batch in metric_logger.log_every(data_loader, 100, header):
samples, targets, depth, spmap, mask_part, img_size, id = batch
samples = samples.to(device)
if depth is not None:
depth = depth.to(device)
if spmap is not None:
spmap = spmap.to(device)
if mask_part is not None:
mask_part = mask_part.to(device)
if img_size is not None:
img_size = torch.stack(img_size, 0)
img_size = img_size.to(device)
outputs = model(samples, depth, spmap, mask_part, img_size, [{k: v.to(device) for k, v in t.items() if k!="id" and k!="img_id"} for t in targets])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['hoi'](outputs, orig_target_sizes)
preds.extend(list(itertools.chain.from_iterable(utils.all_gather(results))))
# For avoiding a runtime error, the copy is used
gts.extend(list(itertools.chain.from_iterable(utils.all_gather(copy.deepcopy(targets)))))
# gather the stats from all processes
metric_logger.synchronize_between_processes()
img_ids = [img_gts['id'] for img_gts in gts]
_, indices = np.unique(img_ids, return_index=True)
preds = [img_preds for i, img_preds in enumerate(preds) if i in indices]
gts = [img_gts for i, img_gts in enumerate(gts) if i in indices]
if dataset_file == 'hico':
evaluator = HICOEvaluator(preds, gts, subject_category_id, data_loader.dataset.rare_triplets, data_loader.dataset.non_rare_triplets, data_loader.dataset.correct_mat)
elif dataset_file == 'vcoco':
evaluator = VCOCOEvaluator(preds, gts, subject_category_id, data_loader.dataset.correct_mat)
stats = evaluator.evaluate()
return stats, [gts, preds]
def extract_feature(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable,
device: torch.device, epoch: int, save_path: str):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Extract {}: '.format(epoch)
print_freq = 200
cnt = 0
if not os.path.exists(save_path):
os.mkdir(save_path)
for batch in metric_logger.log_every(data_loader, print_freq, header):
samples, targets, depth, spmap, mask_part, img_size, id = batch
samples = samples.to(device)
if depth is not None:
depth = depth.to(device)
if spmap is not None:
spmap = spmap.to(device)
if mask_part is not None:
mask_part = mask_part.to(device)
if img_size is not None:
img_size = torch.stack(img_size, 0)
img_size = img_size.to(device)
targets = [{k: v.to(device) for k, v in t.items() if k!="id" and k!="img_id"} for t in targets]
outputs = model(samples, depth, spmap, mask_part, img_size, targets)
torch.save({"batch_shape": samples.decompose()[1].shape,
"binary_decoder_weight": outputs["binary_decoder_weight"].detach().cpu().numpy(),
"pred_sub_boxes": outputs["pred_sub_boxes"].detach().cpu().numpy(),
"pred_obj_boxes": outputs["pred_obj_boxes"].detach().cpu().numpy(),
"size": img_size,
"id": id,
"pred_binary_logits": outputs['pred_binary_logits'].detach().cpu().numpy(),
"pred_part_binary_logits": outputs['pred_part_binary_logits'].detach().cpu().numpy(),
}, os.path.join(save_path, "att_{}_{}.pth".format(cnt, os.environ['LOCAL_RANK'])))
cnt += 1
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}