-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathDualTaskLoss.py
91 lines (66 loc) · 2.74 KB
/
DualTaskLoss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import torch
import torch.nn as nn
import torch.nn.functional as F
from my_functionals import compute_grad_mag
def _gumbel_softmax_sample(logits, tau=1, eps=1e-10):
"""
Draw a sample from the Gumbel-Softmax distribution
based on
hnn.s://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
assert logits.dim() == 3
gumbel_noise = _sample_gumbel(logits.size(), eps=eps)
y = (logits.cuda()) + (gumbel_noise.cuda())
return F.softmax(y / tau, 1)
def _one_hot_embedding(labels, num_classes):
"""Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N, #classes].
"""
y = torch.eye(num_classes).cuda()
return y[labels].permute(0,3,1,2)
def _sample_gumbel(shape, eps=1e-10):
"""
Sample from Gumbel(0, 1)
based on
hnn.s://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = torch.rand(shape).cuda()
return - torch.log(eps - torch.log(U + eps))
class DualTaskLoss(nn.Module):
def __init__(self, cuda=False):
super(DualTaskLoss, self).__init__()
self._cuda = cuda
return
def forward(self, input_logits, gts):
"""
:param input_logits: NxCxHxW segin
:param gt_semantic_masks: NxCxHxW segmask
:return: final loss
"""
N, C, H, W = input_logits.shape
th = 1e-8 # 1e-10
eps = 1e-10
gt_semantic_masks = gts.detach()
gt_semantic_masks = gt_semantic_masks.long().cuda()
y = torch.eye(6).cuda()
gt_semantic_masks = y[gt_semantic_masks].permute(0,3,1,2)
#gt_semantic_masks = _one_hot_embedding(gt_semantic_masks, 6).detach()
g = _gumbel_softmax_sample(input_logits.view(N, C, -1), tau=0.5)
g = g.reshape((N, C, H, W))
g = compute_grad_mag(g, cuda=self._cuda)
g_hat = compute_grad_mag(gt_semantic_masks, cuda=self._cuda)
g = g.view(N, -1)
g_hat = g_hat.reshape(N, -1)
loss_ewise = F.l1_loss(g, g_hat, reduction='none', reduce=False)
p_plus_g_mask = (g >= th).detach().float()
loss_p_plus_g = torch.sum(loss_ewise * p_plus_g_mask) / (torch.sum(p_plus_g_mask) + eps)
p_plus_g_hat_mask = (g_hat >= th).detach().float()
loss_p_plus_g_hat = torch.sum(loss_ewise * p_plus_g_hat_mask) / (torch.sum(p_plus_g_hat_mask) + eps)
total_loss = 0.5 * loss_p_plus_g + 0.5 * loss_p_plus_g_hat
return total_loss