-
Notifications
You must be signed in to change notification settings - Fork 3
/
archs.py
104 lines (88 loc) · 3.54 KB
/
archs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
from imports import *
class Generator(nn.Module):
def __init__(self, ngpu, nc=3, ndf=160, ngf=160, nz=100):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d( ngf * 2, ngf, 4, 3, 3, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 4, 6, bias=False),
nn.Tanh()
# state size. (nc) x 160 x 160
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, ngpu, nc=3, ndf=160, ngf=160, nz=100):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 160 x 160
nn.Conv2d(nc, ndf, 4, 4, 6, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 3, 3, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
#nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
def CWLoss(logits, target, b_size, is_targeted, num_classes=None, kappa=0):
# inputs to the softmax function are called logits.
# https://arxiv.org/pdf/1608.04644.pdf
#set_trace()
target_one_hot = torch.eye(num_classes).type(logits.type())[target.long()]
logits = torch.log(logits / (1 - logits))
#logits = torch.log(logits)
# https://github.com/carlini/nn_robust_attacks/blob/master/l2_attack.py
real = torch.sum(target_one_hot*logits, 1)
other = torch.max((1-target_one_hot)*logits - (target_one_hot*10000), 1)[0]
kappa = torch.zeros_like(other).fill_(kappa)
if is_targeted:
return torch.sum(torch.max(other-real, kappa))
return torch.sum(real-other)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
x = x.view(x.size(0), -1)
return x
class normalize(nn.Module):
def __init__(self):
super(normalize, self).__init__()
def forward(self, x):
x = F.normalize(x, p=2, dim=1)
return x