import torch.nn as nn import torch.nn.functional as F # NOTE: LATENT_DIM doit être le même que celui utilisé pour l'entraînement (128) class VAE(nn.Module): def __init__(self, latent_dim=128): super(VAE, self).__init__() self.latent_dim = latent_dim # ENCODEUR self.encoder = nn.Sequential( nn.Conv2d(3, 32, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.Flatten() ) self.fc_mu = nn.Linear(256 * 4 * 4, latent_dim) self.fc_logvar = nn.Linear(256 * 4 * 4, latent_dim) # DÉCODEUR self.decoder_input = nn.Linear(latent_dim, 256 * 4 * 4) self.decoder = nn.Sequential( nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1), nn.ReLU(), nn.ConvTranspose2d(32, 3, kernel_size=4, stride=2, padding=1), nn.Tanh() ) def decode(self, z): h = self.decoder_input(z) h = h.view(-1, 256, 4, 4) return self.decoder(h)