56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183 | class GeneralCalculator(BasicTaskCalculator):
r"""
Calculator for the dataset in torchvision.datasets.
Args:
device (torch.device): device
optimizer_name (str): the name of the optimizer
"""
def __init__(self, device, optimizer_name='sgd'):
super(GeneralCalculator, self).__init__(device, optimizer_name)
self.criterion = torch.nn.CrossEntropyLoss()
self.DataLoader = torch.utils.data.DataLoader
self.collect_fn = lambda x:tuple(zip(*x))
def compute_loss(self, model, data):
"""
Args:
model: the model to train
data: the training dataset
Returns: dict of train-one-step's result, which should at least contains the key 'loss'
"""
model.train()
tdata = self.to_device(data)
output = model(*tdata)
return {'loss':sum(list(output.values()))}
@torch.no_grad()
def test(self, model, dataset, batch_size=64, num_workers=0, pin_memory=False):
dataloader = self.get_dataloader(dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, shuffle=False)
num_classes = dataset.num_classes
model.train()
num_samples = 0
losses = {}
for batch_data in dataloader:
batch_data = self.to_device(batch_data)
output = model(*batch_data)
for k in output:
losses[k] = losses.get(k, 0.0) + output[k].item()*len(batch_data[0])
losses['all_loss'] = losses.get('all_loss', 0.0) + sum([v.item() for v in output.values()])
num_samples += len(batch_data[0])
for k,v in losses.items():
losses[k]/=num_samples
# compute AP
predictions = []
targets = []
model.eval()
for images, labels in tqdm(dataloader, desc='Predicting'):
images = list(img.to(self.device) for img in images)
labels = [{k: v.numpy() for k, v in t.items()} for t in labels]
outputs = model(images)
for out in outputs:
for k in out.keys():
out[k] = out[k].cpu().numpy()
predictions.extend(outputs)
targets.extend(labels)
# count TP for each class
dects = {i:[] for i in range(1, num_classes)}
gts = {i: {} for i in range(1, num_classes)}
for image_id, pred in enumerate(predictions):
for det_id in range(len(pred['boxes'])):
class_id = int(pred['labels'][det_id])
dects[class_id].append([image_id, class_id, pred['scores'][det_id], pred['boxes'][det_id]])
for image_id, target in enumerate(targets):
for gt_id in range(len(target['boxes'])):
class_id = int(target['labels'][gt_id])
gts[class_id][image_id] = gts[class_id].get(image_id, []) + [[image_id, class_id, [], target['boxes'][gt_id]]]
res = []
ious = np.arange(0.5, 1.0, 0.05)
tf_dicts = {class_id:{iou_th:{'tp':None, 'fp':None} for iou_th in ious} for class_id in range(1, num_classes)}
for class_id in range(1, num_classes):
c_dects = sorted(dects[class_id], key=lambda d:d[2], reverse=True)
c_gts = gts[class_id]
c_tf_dict = tf_dicts[class_id]
for iou_th in c_tf_dict:
c_tf_dict[iou_th]['tp'] = np.zeros(len(c_dects))
c_tf_dict[iou_th]['fp'] = np.zeros(len(c_dects))
# c_tp = np.zeros(len(c_dects))
# c_fp = np.zeros(len(c_dects))
c_npos = sum(list(len(v) for v in c_gts.values()))
for det_id in range(len(c_dects)):
image_id = c_dects[det_id][0]
gt = c_gts[image_id] if image_id in c_gts else []
max_iou = -0.1
for j in range(len(gt)):
d_iou = iou(gt[j][3], c_dects[det_id][3])
if d_iou> max_iou:
max_iou = d_iou
jmax = j
for iou_th in ious:
if max_iou>iou_th:
if iou_th not in c_gts[c_dects[det_id][0]][jmax][2]:
c_gts[c_dects[det_id][0]][jmax][2].append(iou_th)
c_tf_dict[iou_th]['tp'][det_id] = 1
# c_tp[det_id] = 1
else:
c_tf_dict[iou_th]['fp'][det_id] = 1
# c_fp[det_id] = 1
else:
c_tf_dict[iou_th]['fp'][det_id] = 1
res_ious = {}
for iou_th in ious:
c_acc_fp_i = np.cumsum(c_tf_dict[iou_th]['fp'])
c_acc_tp_i = np.cumsum(c_tf_dict[iou_th]['tp'])
c_recall_i = c_acc_tp_i/c_npos
c_precision_i = np.divide(c_acc_tp_i, (c_acc_tp_i + c_acc_fp_i))
c_ap_i, c_mpre_i, c_mrec_i, c_ii_i = average_precision(c_recall_i, c_precision_i)
res_ious[iou_th] = c_ap_i
res.append(res_ious)
# mAP@0.5
tmp = [np.array([c_res[iou_th] for c_res in res]).mean() for iou_th in ious]
mAP_05 = tmp[0]
mAP_075 = tmp[5]
mAP_05_095 = np.array(tmp).mean()
ret = {}
ret.update(losses)
ret.update({'mAP@.5':float(mAP_05), 'mAP@.75':float(mAP_075), 'mAP@.5:.95':mAP_05_095})
return ret
def to_device(self, data):
images, targets = data
images = list(img.to(self.device) for img in images)
targets = [{k: v.to(self.device) for k, v in t.items()} for t in targets]
return images, targets
def get_dataloader(self, dataset, batch_size=64, shuffle=True, num_workers=0, pin_memory=False, drop_last=False):
if self.DataLoader == None:
raise NotImplementedError("DataLoader Not Found.")
return self.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, collate_fn=self.collect_fn)
|