DCGAN, где размер изображения различен для дискриминатора и генератора - PullRequest
0 голосов
/ 30 сентября 2019

Я работаю с DCGAN, где мой дискриминатор будет иметь реальные изображения размером 128 x 128, в то время как вход в сеть генератора - это изображение 64x64 (более низкое разрешение), а не многочленное распределение Гаусса. Когда я пытаюсь запустить обучающий цикл, я получаю ошибки измерения, и я считаю, что моя архитектура измерений дискриминатора и генератора может быть неправильной. Однако я не могу понять, где лежит ошибка. ngf = 64, ndf = 128, nz = 745 Вот код для генератора и дискриминатора:

class G(nn.Module): # We introduce a class to define the generator.

    def __init__(self): # We introduce the __init__() function that will define the architecture of the generator.
        super(G, self).__init__() # We inherit from the nn.Module tools.
        self.main = nn.Sequential( # We create a meta module of a neural network that will contain a sequence of modules (convolutions, full connections, etc.).
            # input is Z, going into a convolution
            nn.ConvTranspose2d(in_channels=nz, out_channels=ngf*8, kernel_size=4, stride=1, padding=0, bias = False), # first index is HR_data.shape[0] aka nz, second index is ngf * 8, where ngf relates to the size of the feature maps, i.e. 64
            nn.BatchNorm2d(ngf*8), # We normalize all the features along the dimension of the batch.
            nn.ReLU(True), # We apply a ReLU rectification to break the linearity.
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf*8, ngf*4, 4, 2, 1, bias = False), # We add another inversed convolution.
            nn.BatchNorm2d(ngf*4), # We normalize again.
            nn.ReLU(True), # We apply another ReLU.
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf*4, ngf*2, 4, 2, 1, bias = False), # We add another inversed convolution.
            nn.BatchNorm2d(ngf*2), # We normalize again.
            nn.ReLU(True), # We apply another ReLU.
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf*2, ngf, 4, 2, 1, bias = False), # We add another inversed convolution.
            nn.BatchNorm2d(ngf), # We normalize again.
            nn.ReLU(True), # We apply another ReLU.
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(ngf, 3, 4, 2, 1, bias = False), # We add another inversed convolution.
            nn.Tanh() # We apply a Tanh rectification to break the linearity and stay between -1 and +1.
            # state size. 3 x 64 x 64
        )

    def forward(self, input): # We define the forward function that takes as argument an input that will be fed to the neural network, and that will return the output containing the generated images.
        output = self.main(input) # We forward propagate the signal through the whole neural network of the generator defined by self.main.
        return output # We return the output containing the generated images.

# Creating the generator
netG = G() # We create the generator object.
netG.apply(weights_init) # We initialize all the weights of its neural network.

# Defining the discriminator

class D(nn.Module): # We introduce a class to define the discriminator.

    def __init__(self): # We introduce the __init__() function that will define the architecture of the discriminator.
        super(D, self).__init__() # We inherit from the nn.Module tools.
        self.main = nn.Sequential( # We create a meta module of a neural network that will contain a sequence of modules (convolutions, full connections, etc.).
             # input is (nc) x 128 x 128
            nn.Conv2d(3, ndf, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2, inplace = True), # We apply a LeakyReLU.
            # state size. (ndf) x 64 x 64
            nn.Conv2d(ndf, ndf*2, 4, 2, 1, bias = False), # We add another convolution.
            nn.BatchNorm2d(ndf*2), # ndf * 2
            nn.LeakyReLU(0.2, inplace = True), # We apply another LeakyReLU.
            # state size. (ndf*2) x 32 x 32
            nn.Conv2d(ndf*2, ndf*4, 4, 2, 1, bias = False), # We add another convolution.
            nn.BatchNorm2d(ndf*4), # We normalize again.
            nn.LeakyReLU(0.2, inplace = True), # We apply another LeakyReLU.
            # state size. (ndf*4) x 16 x 16
            nn.Conv2d(ndf*4, ndf*8, 4, 2, 1, bias = False), # We add another convolution.
            nn.BatchNorm2d(ndf*8), # We normalize again.
            nn.LeakyReLU(0.2, inplace = True), # We apply another LeakyReLU.
            # state size. (ndf*8) x 8 x 8
            nn.Conv2d(ndf*8, 1, 4, 1, 0, bias = False), # We add another convolution.
            nn.Sigmoid() # We apply a Sigmoid rectification to break the linearity and stay between 0 and 1.
        )

    def forward(self, input): # We define the forward function that takes as argument an input that will be fed to the neural network, and that will return the output which will be a value between 0 and 1.
        output = self.main(input) # We forward propagate the signal through the whole neural network of the discriminator defined by self.main.
        return output.view(-1) # We return the output which will be a value between 0 and 1.

# Creating the discriminator
netD = D() # We create the discriminator object.
netD.apply(weights_init) # We initialize all the weights of its neural network.
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...