-
Notifications
You must be signed in to change notification settings - Fork 26
/
Copy pathwgan.py
69 lines (57 loc) · 2.1 KB
/
wgan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
# input 1824
nn.Conv1d(1, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size 912
nn.Conv1d(64, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(128),
nn.LeakyReLU(0.2, inplace=True),
# state size 456
nn.Conv1d(128, 256, kernel_size=4,
stride=2, padding=1, bias=False),
nn.BatchNorm1d(256),
nn.LeakyReLU(0.2, inplace=True),
# state size 228
nn.Conv1d(256, 512, kernel_size=4,
stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.2, inplace=True),
# state size 114
nn.Conv1d(512, 1, kernel_size=114, stride=1, padding=0, bias=False),
)
def forward(self, x, y=None):
x = self.main(x)
return x
class Generator(nn.Module):
def __init__(self, nz):
super().__init__()
self.main = nn.Sequential(
nn.ConvTranspose1d(nz, 512, 114, 1, 0, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.ConvTranspose1d(512, 256, 4, 2, 1, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.ConvTranspose1d(256, 128, 4, 2, 1, bias=False),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.ConvTranspose1d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.ConvTranspose1d(64, 1, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, x):
x = self.main(x)
return x