Convolution Transposed Exercise

  • torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1)
  • 다양한 세팅에서 전치 컨볼루션 연산이 어떻게 동작하는지 확인해보는 노트북입니다.

1. Import Required Libraries

In [1]:
import torch
import torch.nn as nn
import torch.nn.init as init
import matplotlib.pyplot as plt

print(torch.__version__)
1.1.0

2. Input Data

In [2]:
# 입력으로 1로 채워진 텐서를 생성합니다.
img = torch.ones(1,1,3,3)
print(img)

plt.imshow(img.numpy()[0,0,...],vmin=0)
tensor([[[[1., 1., 1.],
          [1., 1., 1.],
          [1., 1., 1.]]]])
Out[2]:
<matplotlib.image.AxesImage at 0x7f44a786ada0>

3. Set All Weights to One

In [3]:
transpose = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=0, output_padding=0, bias=False)
#print(transpose.weight.data)

# 결과를 확인하기 쉽게 전치 컨볼루션 연산의 가중치를 1로 초기화합니다.
init.constant_(transpose.weight.data,1)
Out[3]:
tensor([[[[1., 1., 1.],
          [1., 1., 1.],
          [1., 1., 1.]]]])

Kernel Size=3, stride=1, padding=0, output_padding=0

In [4]:
out = transpose(img)
print(out,out.size())

plt.imshow(out.detach().numpy()[0,0,...],vmin=0)
plt.show()
tensor([[[[1., 2., 3., 2., 1.],
          [2., 4., 6., 4., 2.],
          [3., 6., 9., 6., 3.],
          [2., 4., 6., 4., 2.],
          [1., 2., 3., 2., 1.]]]], grad_fn=<ThnnConvTranspose2DBackward>) torch.Size([1, 1, 5, 5])

Kernel Size=3, stride=2, padding=0, output_padding=0

In [5]:
transpose = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=3, stride=2, padding=0, output_padding=0, bias=False)
init.constant_(transpose.weight.data,1)
out = transpose(img)

print(out,out.size())
plt.imshow(out.detach().numpy()[0,0,...],vmin=0)
tensor([[[[1., 1., 2., 1., 2., 1., 1.],
          [1., 1., 2., 1., 2., 1., 1.],
          [2., 2., 4., 2., 4., 2., 2.],
          [1., 1., 2., 1., 2., 1., 1.],
          [2., 2., 4., 2., 4., 2., 2.],
          [1., 1., 2., 1., 2., 1., 1.],
          [1., 1., 2., 1., 2., 1., 1.]]]],
       grad_fn=<ThnnConvTranspose2DBackward>) torch.Size([1, 1, 7, 7])
Out[5]:
<matplotlib.image.AxesImage at 0x7f44a4f5e860>

Kernel Size=3, stride=2, padding=1, output_padding=0

In [6]:
transpose = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=3, stride=2, padding=1, output_padding=0, bias=False)
init.constant_(transpose.weight.data,1)
out = transpose(img)

print(out,out.size())
plt.imshow(out.detach().numpy()[0,0,...],vmin=0)
tensor([[[[1., 2., 1., 2., 1.],
          [2., 4., 2., 4., 2.],
          [1., 2., 1., 2., 1.],
          [2., 4., 2., 4., 2.],
          [1., 2., 1., 2., 1.]]]], grad_fn=<ThnnConvTranspose2DBackward>) torch.Size([1, 1, 5, 5])
Out[6]:
<matplotlib.image.AxesImage at 0x7f44a4ebfc18>

Kernel Size=3, stride=2, padding=0, output_padding=1

In [7]:
transpose = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=3, stride=2, padding=0, output_padding=1, bias=False)
init.constant_(transpose.weight.data,1)
out=transpose(img)

print(out,out.size())
plt.imshow(out.detach().numpy()[0,0,...],vmin=0)
tensor([[[[1., 1., 2., 1., 2., 1., 1., 0.],
          [1., 1., 2., 1., 2., 1., 1., 0.],
          [2., 2., 4., 2., 4., 2., 2., 0.],
          [1., 1., 2., 1., 2., 1., 1., 0.],
          [2., 2., 4., 2., 4., 2., 2., 0.],
          [1., 1., 2., 1., 2., 1., 1., 0.],
          [1., 1., 2., 1., 2., 1., 1., 0.],
          [0., 0., 0., 0., 0., 0., 0., 0.]]]],
       grad_fn=<ThnnConvTranspose2DBackward>) torch.Size([1, 1, 8, 8])
Out[7]:
<matplotlib.image.AxesImage at 0x7f44a4e93cc0>

Kernel Size=3, stride=2, padding=1, output_padding=1

In [8]:
transpose = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
init.constant_(transpose.weight.data,1)

out = transpose(img)
print(out,out.size())
plt.imshow(out.detach().numpy()[0,0,...],vmin=0)
tensor([[[[1., 2., 1., 2., 1., 1.],
          [2., 4., 2., 4., 2., 2.],
          [1., 2., 1., 2., 1., 1.],
          [2., 4., 2., 4., 2., 2.],
          [1., 2., 1., 2., 1., 1.],
          [1., 2., 1., 2., 1., 1.]]]], grad_fn=<ThnnConvTranspose2DBackward>) torch.Size([1, 1, 6, 6])
Out[8]:
<matplotlib.image.AxesImage at 0x7f44a4e6c6d8>
In [9]:
transpose = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=4, stride=2, padding=1, output_padding=0, bias=False)
init.constant_(transpose.weight.data,1)

out = transpose(img)
print(out,out.size())
plt.imshow(out.detach().numpy()[0,0,...],vmin=0)
tensor([[[[1., 2., 2., 2., 2., 1.],
          [2., 4., 4., 4., 4., 2.],
          [2., 4., 4., 4., 4., 2.],
          [2., 4., 4., 4., 4., 2.],
          [2., 4., 4., 4., 4., 2.],
          [1., 2., 2., 2., 2., 1.]]]], grad_fn=<ThnnConvTranspose2DBackward>) torch.Size([1, 1, 6, 6])
Out[9]:
<matplotlib.image.AxesImage at 0x7f44a4f994e0>
In [0]: