Autoencoder.py 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940
  1. from torch import nn
  2. class Autoencoder(nn.Module):
  3. def __init__(self):
  4. super(Autoencoder, self).__init__()
  5. self.encoder = nn.Sequential(
  6. nn.Conv2d(3, 128, kernel_size=7, stride=4, padding=2),
  7. nn.ReLU(True),
  8. nn.Conv2d(128, 64, kernel_size=3, stride=2, padding=1),
  9. nn.ReLU(True),
  10. nn.Conv2d(64, 32, kernel_size=3, stride=2, padding=1),
  11. nn.ReLU(True),
  12. nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),
  13. nn.ReLU(True),
  14. nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
  15. nn.ReLU(True),
  16. nn.Flatten(),
  17. nn.Linear(2048, 512),
  18. nn.ReLU(True), # see https://stackoverflow.com/questions/50187127/is-it-necessary-to-use-a-linear-bottleneck-layer-for-autoencoder
  19. )
  20. self.decoder = nn.Sequential(
  21. nn.Linear(512, 2048),
  22. nn.ReLU(True),
  23. nn.Unflatten(1, (128, 4, 4)),
  24. nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
  25. nn.ReLU(True),
  26. nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1),
  27. nn.ReLU(True),
  28. nn.ConvTranspose2d(32, 32, kernel_size=4, stride=2, padding=1),
  29. nn.ReLU(True),
  30. nn.ConvTranspose2d(32, 32, kernel_size=4, stride=2, padding=1),
  31. nn.ReLU(True),
  32. nn.ConvTranspose2d(32, 3, kernel_size=8, stride=4, padding=2),
  33. nn.Tanh(),
  34. )
  35. def forward(self, x):
  36. x = self.encoder(x)
  37. x = self.decoder(x)
  38. return x