-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathutils.py
34 lines (24 loc) · 958 Bytes
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import numpy as np
import numpy as np
import torch
def create_masks(inp, device=None):
"""
Create both padding mask and attention mask for the input sequence.
Args:
inp: Input sequence tensor (PyTorch).
Returns:
mask: Combined mask tensor (PyTorch).
"""
seq_np = inp.cpu().numpy() if inp.is_cuda else inp.numpy()
def get_padding_mask(seq):
padding_mask = (seq == 0).astype(float)
# Add extra dimensions to add the padding to the attention logits.
return padding_mask[:, np.newaxis, np.newaxis, :] # (batch_size, 1, 1, seq_len)
def attention_mask(size):
mask = 1 - np.tril(np.ones((size, size)))
return mask # (seq_len, seq_len)
att_mask = attention_mask(seq_np.shape[1])
padding_mask = get_padding_mask(seq_np)
mask_np = np.maximum(padding_mask, att_mask[np.newaxis, :, :])
mask = torch.tensor(mask_np, dtype=torch.float32)
return mask.to(device)