In [2]:
#!/usr/bin/env python
# coding: utf-8

# # GAN Basic
# 
# - Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks(https://arxiv.org/pdf/1511.06434.pdf)
# - Multi-GPU 사용법
# - Sequential에 OrderedDict를 전달하는 예시
# 
# ![대체 텍스트](http://www.codingwoman.com/wp-content/uploads/2018/09/gan-2-700x529.jpg)

# In[ ]:


#get_ipython().system('pip install torch torchvision')


# In[ ]:


#get_ipython().system('pip install pillow==4.1.1')
#get_ipython().run_line_magic('reload_ext', 'autoreload')
#get_ipython().run_line_magic('autoreload', '')


# ## 1. Import required libraries

# In[ ]:


# 단순한 GAN 모델 생성 및 OrderedDict 사용법

import os
import torch
import torch.nn as nn
import torch.utils as utils
import torch.nn.init as init
import torchvision.utils as v_utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict


# In[ ]:


# 참고
# 전치 컨볼루션 연산으로 이미지 크기를 2배로 늘리는 방법 2가지
# 둘중에 kernel_size=4,stride=2,padding=1 세팅이 체커보드 아티팩트가 덜합니다.

test = torch.ones(1,1,16,16)
conv1 = nn.ConvTranspose2d(1,1,kernel_size=4,stride=2,padding=1)
out = conv1(test)
print(out.size())

conv1 = nn.ConvTranspose2d(1,1,kernel_size=3,stride=2,padding=1,output_padding=1)
out = conv1(test)
print(out.size())


# ## 2. Hyperparameter setting

# In[ ]:


# Set Hyperparameters
# change num_gpu to the number of gpus you want to use

epoch = 50
batch_size = 512
learning_rate = 0.0002
num_gpus = 1
z_size = 50
middle_size = 200


# ## 3. Data Setting

# In[ ]:


# Download Data

mnist_train = dset.MNIST("./", train=True, transform=transforms.ToTensor(), target_transform=None, download=True)

# Set Data Loader(input pipeline)

train_loader = torch.utils.data.DataLoader(dataset=mnist_train,batch_size=batch_size,shuffle=True,drop_last=True)


# ## 4. Generator

# In[ ]:


# Generator receives random noise z and create 1x28x28 image
# OrderedDict를 사용해 해당 연산의 이름을 지정할 수 있습니다.

class Generator(nn.Module):
    def __init__(self):
        super(Generator,self).__init__()
        self.layer1 = nn.Sequential(OrderedDict([
                        ('fc1',nn.Linear(z_size,middle_size)),
                        ('bn1',nn.BatchNorm1d(middle_size)),
                        ('act1',nn.ReLU()),
        ]))
        self.layer2 = nn.Sequential(OrderedDict([
                        ('fc2', nn.Linear(middle_size,784)),
                        #('bn2', nn.BatchNorm1d(784)),
                        ('tanh', nn.Tanh()),
        ]))
    def forward(self,z):
        out = self.layer1(z)
        out = self.layer2(out)
        out = out.view(batch_size,1,28,28)
        return out


# ## 5. Discriminator

# In[ ]:


# Discriminator receives 1x28x28 image and returns a float number 0~1
# we can name each layer using OrderedDict

class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator,self).__init__()
        self.layer1 = nn.Sequential(OrderedDict([
                        ('fc1',nn.Linear(784,middle_size)),
                        #('bn1',nn.BatchNorm1d(middle_size)),
                        ('act1',nn.LeakyReLU()),  
            
        ]))
        self.layer2 = nn.Sequential(OrderedDict([
                        ('fc2', nn.Linear(middle_size,1)),
                        ('bn2', nn.BatchNorm1d(1)),
                        ('act2', nn.Sigmoid()),
        ]))
                                    
    def forward(self,x):
        out = x.view(batch_size, -1)
        out = self.layer1(out)
        out = self.layer2(out)
        return out


# ## 6. Put instances on Multi-gpu

# In[ ]:


# Put class objects on Multiple GPUs using 
# torch.nn.DataParallel(module, device_ids=None, output_device=None, dim=0)
# device_ids: default all devices / output_device: default device 0 
# along with .cuda()

device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(device)

generator = nn.DataParallel(Generator()).to(device)
discriminator = nn.DataParallel(Discriminator()).to(device)


# ## 7. Check layers

# In[ ]:


# Get parameter list by using class.state_dict().keys()

gen_params = generator.state_dict().keys()
dis_params = discriminator.state_dict().keys()

for i in gen_params:
    print(i)


# ## 8. Set Loss function & Optimizer

# In[ ]:


# loss function, optimizers, and labels for training

loss_func = nn.MSELoss()
gen_optim = torch.optim.Adam(generator.parameters(), lr=learning_rate,betas=(0.5,0.999))
dis_optim = torch.optim.Adam(discriminator.parameters(), lr=learning_rate,betas=(0.5,0.999))

ones_label = torch.ones(batch_size,1).to(device)
zeros_label = torch.zeros(batch_size,1).to(device)


# ## 9. Restore Model

# In[ ]:


# model restore if any

try:
    generator, discriminator = torch.load('./model/vanilla_gan.pkl')
    print("\n--------model restored--------\n")
except:
    print("\n--------model not restored--------\n")
    pass
  
try:
  os.mkdir("./model")
except:
  pass

try:
  os.mkdir("./result")
except:
  pass


# ## 10. Train Model

# In[ ]:


# train

for i in range(epoch):
    for j,(image,label) in enumerate(train_loader):
        image = image.to(device)
        
        # 구분자 학습
        dis_optim.zero_grad()
      
        # Fake Data 
        # 랜덤한 z를 샘플링해줍니다.
        z = init.normal_(torch.Tensor(batch_size,z_size),mean=0,std=0.1).to(device)
        gen_fake = generator.forward(z)
        dis_fake = discriminator.forward(gen_fake)
        
        # Real Data
        dis_real = discriminator.forward(image)
        
        # 두 손실을 더해 최종손실에 대해 기울기 게산을 합니다.
        dis_loss = torch.sum(loss_func(dis_fake,zeros_label)) + torch.sum(loss_func(dis_real,ones_label))
        dis_loss.backward(retain_graph=True)
        dis_optim.step()
        
        # 생성자 학습
        gen_optim.zero_grad()
        
        # Fake Data
        z = init.normal_(torch.Tensor(batch_size,z_size),mean=0,std=0.1).to(device)
        gen_fake = generator.forward(z)
        dis_fake = discriminator.forward(gen_fake)
        
        gen_loss = torch.sum(loss_func(dis_fake,ones_label)) # fake classified as real
        gen_loss.backward()
        gen_optim.step()
    
        # model save
        if j % 100 == 0:
            print(gen_loss,dis_loss)
            torch.save([generator,discriminator],'./model/vanilla_gan.pkl')            
            v_utils.save_image(gen_fake.cpu().data[0:25],"./result/gen_{}_{}.png".format(i,j), nrow=5)
            print("{}th epoch gen_loss: {} dis_loss: {}".format(i,gen_loss.data,dis_loss.data))


# # Do not expect much on this naive GAN 
# - 네트워크 구조에서 봤듯이 거의 학습이 되지 않습니다.
# - 여기에 convolution 연산을 추가하면 성능이 올라가는데 이는 DCGAN 코드에서 확인하세요.

# In[ ]:


from glob import glob 

for i in range(epoch):
  print(i)
  file_list = glob("./result/gen_{}_*.png".format(i))
  img_per_epoch = len(file_list)
  for idx,j in enumerate(file_list):
    img = plt.imread(j)
    plt.subplot(1,img_per_epoch,idx+1)
    plt.imshow(img)
  plt.show()
torch.Size([1, 1, 32, 32])
torch.Size([1, 1, 32, 32])
cpu
module.layer1.fc1.weight
module.layer1.fc1.bias
module.layer1.bn1.weight
module.layer1.bn1.bias
module.layer1.bn1.running_mean
module.layer1.bn1.running_var
module.layer1.bn1.num_batches_tracked
module.layer2.fc2.weight
module.layer2.fc2.bias

--------model restored--------

tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4189, grad_fn=<AddBackward0>)
0th epoch gen_loss: 0.3501772880554199 dis_loss: 0.41887181997299194
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4191, grad_fn=<AddBackward0>)
0th epoch gen_loss: 0.35017794370651245 dis_loss: 0.4190841317176819
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4189, grad_fn=<AddBackward0>)
1th epoch gen_loss: 0.3501805067062378 dis_loss: 0.4189063310623169
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4197, grad_fn=<AddBackward0>)
1th epoch gen_loss: 0.35017919540405273 dis_loss: 0.4196997284889221
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4222, grad_fn=<AddBackward0>)
2th epoch gen_loss: 0.35017645359039307 dis_loss: 0.4221562147140503
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4196, grad_fn=<AddBackward0>)
2th epoch gen_loss: 0.35017743706703186 dis_loss: 0.41962945461273193
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4192, grad_fn=<AddBackward0>)
3th epoch gen_loss: 0.35017678141593933 dis_loss: 0.4191930294036865
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4201, grad_fn=<AddBackward0>)
3th epoch gen_loss: 0.3501800000667572 dis_loss: 0.4200609028339386
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4182, grad_fn=<AddBackward0>)
4th epoch gen_loss: 0.3501807749271393 dis_loss: 0.4182341694831848
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4204, grad_fn=<AddBackward0>)
4th epoch gen_loss: 0.35018035769462585 dis_loss: 0.4203980565071106
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4180, grad_fn=<AddBackward0>)
5th epoch gen_loss: 0.350178062915802 dis_loss: 0.4180440902709961
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4183, grad_fn=<AddBackward0>)
5th epoch gen_loss: 0.35017895698547363 dis_loss: 0.4183322787284851
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4182, grad_fn=<AddBackward0>)
6th epoch gen_loss: 0.3501785695552826 dis_loss: 0.4181720018386841
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4193, grad_fn=<AddBackward0>)
6th epoch gen_loss: 0.3501787781715393 dis_loss: 0.4192894697189331
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4183, grad_fn=<AddBackward0>)
7th epoch gen_loss: 0.35018032789230347 dis_loss: 0.4182773232460022
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4179, grad_fn=<AddBackward0>)
7th epoch gen_loss: 0.35017791390419006 dis_loss: 0.41788411140441895
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4189, grad_fn=<AddBackward0>)
8th epoch gen_loss: 0.3501812815666199 dis_loss: 0.418937087059021
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4196, grad_fn=<AddBackward0>)
8th epoch gen_loss: 0.3501811623573303 dis_loss: 0.41958242654800415
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4185, grad_fn=<AddBackward0>)
9th epoch gen_loss: 0.35018008947372437 dis_loss: 0.41853028535842896
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4198, grad_fn=<AddBackward0>)
9th epoch gen_loss: 0.35017985105514526 dis_loss: 0.41984495520591736
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4180, grad_fn=<AddBackward0>)
10th epoch gen_loss: 0.3501774072647095 dis_loss: 0.4180182218551636
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4187, grad_fn=<AddBackward0>)
10th epoch gen_loss: 0.35018497705459595 dis_loss: 0.4186815023422241
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4198, grad_fn=<AddBackward0>)
11th epoch gen_loss: 0.3501802384853363 dis_loss: 0.41978201270103455
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4205, grad_fn=<AddBackward0>)
11th epoch gen_loss: 0.3501792550086975 dis_loss: 0.42048829793930054
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4197, grad_fn=<AddBackward0>)
12th epoch gen_loss: 0.35017886757850647 dis_loss: 0.4196927845478058
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4198, grad_fn=<AddBackward0>)
12th epoch gen_loss: 0.35017991065979004 dis_loss: 0.41977328062057495
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4188, grad_fn=<AddBackward0>)
13th epoch gen_loss: 0.3501816391944885 dis_loss: 0.4187948405742645
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4183, grad_fn=<AddBackward0>)
13th epoch gen_loss: 0.3501841127872467 dis_loss: 0.4182887673377991
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4193, grad_fn=<AddBackward0>)
14th epoch gen_loss: 0.3501787483692169 dis_loss: 0.4193463921546936
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4183, grad_fn=<AddBackward0>)
14th epoch gen_loss: 0.35017573833465576 dis_loss: 0.4183470606803894
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4189, grad_fn=<AddBackward0>)
15th epoch gen_loss: 0.3501797020435333 dis_loss: 0.41887837648391724
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4183, grad_fn=<AddBackward0>)
15th epoch gen_loss: 0.35018065571784973 dis_loss: 0.4183264672756195
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4199, grad_fn=<AddBackward0>)
16th epoch gen_loss: 0.3501783013343811 dis_loss: 0.41988104581832886
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4185, grad_fn=<AddBackward0>)
16th epoch gen_loss: 0.35017913579940796 dis_loss: 0.4185231924057007
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4196, grad_fn=<AddBackward0>)
17th epoch gen_loss: 0.35017967224121094 dis_loss: 0.4195583164691925
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4193, grad_fn=<AddBackward0>)
17th epoch gen_loss: 0.3501861095428467 dis_loss: 0.41930341720581055
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4194, grad_fn=<AddBackward0>)
18th epoch gen_loss: 0.35018211603164673 dis_loss: 0.41937005519866943
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4195, grad_fn=<AddBackward0>)
18th epoch gen_loss: 0.3501792550086975 dis_loss: 0.4194599986076355
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4191, grad_fn=<AddBackward0>)
19th epoch gen_loss: 0.3501785695552826 dis_loss: 0.4190990626811981
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4219, grad_fn=<AddBackward0>)
19th epoch gen_loss: 0.35017985105514526 dis_loss: 0.42188581824302673
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4183, grad_fn=<AddBackward0>)
20th epoch gen_loss: 0.3501785695552826 dis_loss: 0.41834723949432373
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4201, grad_fn=<AddBackward0>)
20th epoch gen_loss: 0.35018080472946167 dis_loss: 0.4200921654701233
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4183, grad_fn=<AddBackward0>)
21th epoch gen_loss: 0.3501834273338318 dis_loss: 0.41831594705581665
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4188, grad_fn=<AddBackward0>)
21th epoch gen_loss: 0.35018396377563477 dis_loss: 0.41875264048576355
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4209, grad_fn=<AddBackward0>)
22th epoch gen_loss: 0.350180447101593 dis_loss: 0.4208828806877136
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4206, grad_fn=<AddBackward0>)
22th epoch gen_loss: 0.35018104314804077 dis_loss: 0.42056596279144287
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4190, grad_fn=<AddBackward0>)
23th epoch gen_loss: 0.35017791390419006 dis_loss: 0.4189741611480713
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4192, grad_fn=<AddBackward0>)
23th epoch gen_loss: 0.35017871856689453 dis_loss: 0.4192080795764923
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4192, grad_fn=<AddBackward0>)
24th epoch gen_loss: 0.3501830995082855 dis_loss: 0.41923055052757263
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4184, grad_fn=<AddBackward0>)
24th epoch gen_loss: 0.35018229484558105 dis_loss: 0.4184320569038391
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4190, grad_fn=<AddBackward0>)
25th epoch gen_loss: 0.3501847982406616 dis_loss: 0.41898807883262634
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4192, grad_fn=<AddBackward0>)
25th epoch gen_loss: 0.35017579793930054 dis_loss: 0.4192105531692505
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4187, grad_fn=<AddBackward0>)
26th epoch gen_loss: 0.3501781225204468 dis_loss: 0.4187166094779968
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4191, grad_fn=<AddBackward0>)
26th epoch gen_loss: 0.3501823842525482 dis_loss: 0.4191412329673767
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4182, grad_fn=<AddBackward0>)
27th epoch gen_loss: 0.3501795530319214 dis_loss: 0.4181707203388214
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4193, grad_fn=<AddBackward0>)
27th epoch gen_loss: 0.35018038749694824 dis_loss: 0.41934406757354736
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4191, grad_fn=<AddBackward0>)
28th epoch gen_loss: 0.35017919540405273 dis_loss: 0.41907939314842224
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4185, grad_fn=<AddBackward0>)
28th epoch gen_loss: 0.35018235445022583 dis_loss: 0.41854676604270935
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4198, grad_fn=<AddBackward0>)
29th epoch gen_loss: 0.35018008947372437 dis_loss: 0.4198489785194397
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4193, grad_fn=<AddBackward0>)
29th epoch gen_loss: 0.3501783311367035 dis_loss: 0.4193449020385742
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4199, grad_fn=<AddBackward0>)
30th epoch gen_loss: 0.35018232464790344 dis_loss: 0.4199061989784241
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4186, grad_fn=<AddBackward0>)
30th epoch gen_loss: 0.3501814007759094 dis_loss: 0.41859304904937744
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4191, grad_fn=<AddBackward0>)
31th epoch gen_loss: 0.3501776456832886 dis_loss: 0.41913968324661255
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4193, grad_fn=<AddBackward0>)
31th epoch gen_loss: 0.3501799702644348 dis_loss: 0.41929852962493896
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4184, grad_fn=<AddBackward0>)
32th epoch gen_loss: 0.35018157958984375 dis_loss: 0.4184403121471405
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4202, grad_fn=<AddBackward0>)
32th epoch gen_loss: 0.35017868876457214 dis_loss: 0.4202386736869812
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4188, grad_fn=<AddBackward0>)
33th epoch gen_loss: 0.35017848014831543 dis_loss: 0.41876885294914246
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4183, grad_fn=<AddBackward0>)
33th epoch gen_loss: 0.3501804769039154 dis_loss: 0.4183080196380615
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4185, grad_fn=<AddBackward0>)
34th epoch gen_loss: 0.35018229484558105 dis_loss: 0.4184894561767578
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4186, grad_fn=<AddBackward0>)
34th epoch gen_loss: 0.35017916560173035 dis_loss: 0.4186018407344818
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4194, grad_fn=<AddBackward0>)
35th epoch gen_loss: 0.35017722845077515 dis_loss: 0.4193791151046753
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4183, grad_fn=<AddBackward0>)
35th epoch gen_loss: 0.35018041729927063 dis_loss: 0.4183443784713745
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4198, grad_fn=<AddBackward0>)
36th epoch gen_loss: 0.35018157958984375 dis_loss: 0.4197959005832672
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4188, grad_fn=<AddBackward0>)
36th epoch gen_loss: 0.3501816391944885 dis_loss: 0.4188382625579834
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4209, grad_fn=<AddBackward0>)
37th epoch gen_loss: 0.3501795530319214 dis_loss: 0.42093396186828613
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4192, grad_fn=<AddBackward0>)
37th epoch gen_loss: 0.3501829504966736 dis_loss: 0.4192349910736084
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4195, grad_fn=<AddBackward0>)
38th epoch gen_loss: 0.35018277168273926 dis_loss: 0.41951024532318115
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4182, grad_fn=<AddBackward0>)
38th epoch gen_loss: 0.35017916560173035 dis_loss: 0.4181717038154602
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4190, grad_fn=<AddBackward0>)
39th epoch gen_loss: 0.35017669200897217 dis_loss: 0.4190109968185425
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4189, grad_fn=<AddBackward0>)
39th epoch gen_loss: 0.35017824172973633 dis_loss: 0.41891753673553467
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4211, grad_fn=<AddBackward0>)
40th epoch gen_loss: 0.35017845034599304 dis_loss: 0.42113226652145386
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4191, grad_fn=<AddBackward0>)
40th epoch gen_loss: 0.3501811623573303 dis_loss: 0.41911792755126953
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4186, grad_fn=<AddBackward0>)
41th epoch gen_loss: 0.3501802086830139 dis_loss: 0.41859835386276245
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4185, grad_fn=<AddBackward0>)
41th epoch gen_loss: 0.3501776456832886 dis_loss: 0.4185017943382263
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4203, grad_fn=<AddBackward0>)
42th epoch gen_loss: 0.3501797318458557 dis_loss: 0.4202903211116791
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4186, grad_fn=<AddBackward0>)
42th epoch gen_loss: 0.35017964243888855 dis_loss: 0.41858968138694763
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4192, grad_fn=<AddBackward0>)
43th epoch gen_loss: 0.35017701983451843 dis_loss: 0.4191858470439911
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4184, grad_fn=<AddBackward0>)
43th epoch gen_loss: 0.3501785099506378 dis_loss: 0.41837653517723083
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4195, grad_fn=<AddBackward0>)
44th epoch gen_loss: 0.35018086433410645 dis_loss: 0.4194973111152649
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4223, grad_fn=<AddBackward0>)
44th epoch gen_loss: 0.35017868876457214 dis_loss: 0.4223387539386749
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4188, grad_fn=<AddBackward0>)
45th epoch gen_loss: 0.35017693042755127 dis_loss: 0.4187568426132202
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4186, grad_fn=<AddBackward0>)
45th epoch gen_loss: 0.35017940402030945 dis_loss: 0.4185620844364166
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4209, grad_fn=<AddBackward0>)
46th epoch gen_loss: 0.35017722845077515 dis_loss: 0.4208521842956543
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4193, grad_fn=<AddBackward0>)
46th epoch gen_loss: 0.350180983543396 dis_loss: 0.41932427883148193
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4195, grad_fn=<AddBackward0>)
47th epoch gen_loss: 0.35017770528793335 dis_loss: 0.41950300335884094
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4196, grad_fn=<AddBackward0>)
47th epoch gen_loss: 0.3501799702644348 dis_loss: 0.4195878207683563
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4197, grad_fn=<AddBackward0>)
48th epoch gen_loss: 0.350180447101593 dis_loss: 0.4196828603744507
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4193, grad_fn=<AddBackward0>)
48th epoch gen_loss: 0.35017913579940796 dis_loss: 0.4192923307418823
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4180, grad_fn=<AddBackward0>)
49th epoch gen_loss: 0.3501781225204468 dis_loss: 0.41804611682891846
tensor(0.3502, grad_fn=<SumBackward0>) tensor(0.4181, grad_fn=<AddBackward0>)
49th epoch gen_loss: 0.3501785695552826 dis_loss: 0.41807088255882263
0
1
2
3
4
5
6
7