ÆÄÀ̽㠹öÁ¯ üũ
import sys
print(sys.version)
3.6.8 (default, Jan 14 2019, 11:02:34) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]
import torch
!pip3 install torch torchvision
Requirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (1.1.0) Requirement already satisfied: torchvision in /usr/local/lib/python3.6/dist-packages (0.3.0) Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torch) (1.16.4) Requirement already satisfied: pillow>=4.1.1 in /usr/local/lib/python3.6/dist-packages (from torchvision) (4.3.0) Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from torchvision) (1.12.0) Requirement already satisfied: olefile in /usr/local/lib/python3.6/dist-packages (from pillow>=4.1.1->torchvision) (0.46)
import torch
print("Torch version:{}".format(torch.__version__))
print("cuda version: {}".format(torch.version.cuda))
print("cudnn version:{}".format(torch.backends.cudnn.version()))
Torch version:1.1.0 cuda version: 10.0.130 cudnn version:7501
!nvcc --version
nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2018 NVIDIA Corporation Built on Sat_Aug_25_21:08:01_CDT_2018 Cuda compilation tools, release 10.0, V10.0.130
!cat /usr/include/x86_64-linux-gnu/cudnn_v*.h | grep CUDNN_MAJOR -A 2
#define CUDNN_MAJOR 7 #define CUDNN_MINOR 6 #define CUDNN_PATCHLEVEL 2 -- #define CUDNN_VERSION (CUDNN_MAJOR * 1000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL) #include "driver_types.h"
# https://pytorch.org/docs/stable/torch.html?highlight=tensor#torch.tensor
# 0À¸·Î Â÷ÀÖ´Â 2x3 ÇüÅÂÀÇ ÅÙ¼¸¦ »ý¼ºÇÕ´Ï´Ù.
cpu_tensor = torch.zeros(2,3)
print(cpu_tensor)
tensor([[0., 0., 0.], [0., 0., 0.]])
# https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.device
# ¾î´À ÀåÄ¡(cpu ȤÀº gpu)¿¡ ÅÙ¼¸¦ ¿Ã¸±Áö ÁöÁ¤ÇÕ´Ï´Ù.
# ¾Æ·¡´Â torch.device¶ó´Â ÇÔ¼ö¸¦ »ç¿ëÇØ gpu·Î ÀåÄ¡¸¦ ÁöÁ¤ÇÕ´Ï´Ù.
device = torch.device('cuda')
# https://pytorch.org/docs/stable/cuda.html?highlight=available#torch.cuda.is_available
# gpu°¡ »ç¿ë °¡´ÉÇÑÁö È®ÀÎÇØÁÝ´Ï´Ù.
if torch.cuda.is_available():
# https://pytorch.org/docs/stable/tensors.html?highlight=#torch.Tensor.to
# cpu¿¡ ÀÖ¾ú´ø ÅÙ¼¸¦ to ÇÔ¼ö¸¦ ÀÌ¿ëÇØ ÁöÁ¤ÇسõÀº ÀåÄ¡(¿©±â¼´Â gpu)·Î ¿Ã·ÁÁÝ´Ï´Ù.
gpu_tensor = cpu_tensor.to(device)
print(gpu_tensor)
tensor([[0., 0., 0.], [0., 0., 0.]], device='cuda:0')
# device ÇÔ¼ö¿Í to ÇÔ¼ö¸¦ ÀÌ¿ëÇØ gpu¿¡ ÀÖ´ø ÅÙ¼¸¦ ´Ù½Ã cpu·Î ¿Å°Ü¿Ã ¼ö ÀÖ½À´Ï´Ù.
cpu_tensor_back = gpu_tensor.to(torch.device('cpu'))
cpu_tensor_back
tensor([[0., 0., 0.], [0., 0., 0.]])