|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
import torch.distributed as dist |
|
|
import torch.nn.functional as F |
|
|
|
|
|
|
|
|
class ClassMeanRecall: |
|
|
|
|
|
def __init__(self, num_classes: int, device: torch.device, k=5): |
|
|
self.num_classes = num_classes |
|
|
self.TP = torch.zeros(num_classes).to(device) |
|
|
self.FN = torch.zeros(num_classes).to(device) |
|
|
self.k = k |
|
|
|
|
|
def __call__(self, logits, labels, valid_classes=None, eps=1e-8): |
|
|
""" |
|
|
:param logits: Tensors of shape [B, num_classes] |
|
|
:param labels: Tensors of shape [B] |
|
|
:param valid_classes: set |
|
|
""" |
|
|
k, tp_tensor, fn_tensor = self.k, self.TP, self.FN |
|
|
logits = F.sigmoid(logits) |
|
|
|
|
|
if valid_classes is not None: |
|
|
_logits = torch.zeros(logits.shape).to(logits.device) |
|
|
for c in valid_classes: |
|
|
_logits[:, c] = logits[:, c] |
|
|
logits = _logits |
|
|
|
|
|
preds = logits.topk(k, dim=1).indices |
|
|
|
|
|
|
|
|
|
|
|
for p, gt in zip(preds, labels): |
|
|
if gt in p: |
|
|
tp_tensor[gt] += 1 |
|
|
else: |
|
|
fn_tensor[gt] += 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TP, FN = tp_tensor.clone(), fn_tensor.clone() |
|
|
dist.all_reduce(TP) |
|
|
dist.all_reduce(FN) |
|
|
|
|
|
nch = torch.sum((TP + FN) > 0) |
|
|
recall = 100.0 * torch.sum(TP / (TP + FN + eps)) / nch |
|
|
topk = 100.0 * sum(TP) / int(sum(TP + FN)) |
|
|
|
|
|
return dict( |
|
|
recall=recall, |
|
|
accuracy=topk, |
|
|
) |
|
|
|