PyTorch PyTorch - QuickStart
Basic # version torch.__version__ # PyTorch version torch.version.cuda # Corresponding CUDA version torch.backends.cudnn.version() # Corresponding cuDNN version torch.cuda.get_device_name(0) # GPU type # seed torch.manual_seed(0) torch.cuda.manual_seed_all(0) # GPU torch.cuda.is_available() os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' torch.cuda.empty_cache() # clear GPU cache device = ( "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" ) Tensor tensor = torch.randn(2, 3) # tensor info tensor.type() # Data type tensor.size() # Shape of the tensor tensor.shape # Shape of the tensor tensor.dim() # Number of dimensions # type convertions tensor = tensor.cuda() tensor = tensor.cpu() tensor = tensor.float() tensor = tensor.long() torch.set_default_tensor_type(torch.FloatTensor) # Set default tensor type # torch.Tensor <=> np.ndarray ndarray = tensor.cpu().numpy() tensor = torch.from_numpy(ndarray).float() tensor = torch.from_numpy(ndarray.copy()).float() # If ndarray has negative stride # 从只包含一个元素的张量中提取值 value = tensor.item() # torch.Tensor <=> PIL.Image # PyTorch中的张量默认采用N×D×H×W的顺序,并且数据范围在[0, 1],需要进行转置和规范化。 image = PIL.Image.fromarray(torch.clamp(tensor * 255, min=0, max=255 ).byte().permute(1, 2, 0).cpu().numpy()) image = torchvision.transforms.functional.to_pil_image(tensor) # Equivalently way tensor = torch.from_numpy(np.asarray(PIL.Image.open(path)) ).permute(2, 0, 1).float() / 255 tensor = torchvision.transforms.functional.to_tensor(PIL.Image.open(path)) # Equivalently way # np.ndarray <=> PIL.Image image = PIL.Image.fromarray(ndarray.astypde(np.uint8)) ndarray = np.asarray(PIL.Image.open(path)) # reshape tensor.reshape(shape) tensor.permute(0,2,1) # swap axes tensor.flatten() # 展成 1D 向量 tensor.squeeze() # 去掉维数为 1 的的维度 tensor.unsqueeze(dim=0) # 增加维度 # shuffle tensor = tensor[torch.randperm(tensor.size(0))] # Shuffle the first dimension # copy # Operation | New/Shared memory | Still in computation graph | tensor.clone() # | New | Yes | tensor.detach() # | Shared | No | tensor.detach().clone() # | New | No | # concatenate tensor = torch.cat(list_of_tensors, dim=0) tensor = torch.stack(list_of_tensors, dim=0) # one-hot N = tensor.size(0) one_hot = torch.zeros(N, num_classes).long() one_hot.scatter_(dim=1, index=torch.unsqueeze(tensor, dim=1), src=torch.ones(N, num_classes).long()) # non-zero torch.nonzero(tensor) # Index of non-zero elements torch.nonzero(tensor == 0) # Index of zero elements torch.nonzero(tensor).size(0) # Number of non-zero elements torch.nonzero(tensor == 0).size(0) # Number of zero elements # equal torch.allclose(tensor1, tensor2) # float tensor torch.equal(tensor1, tensor2) # int tensor # expand # Expand tensor of shape 64*512 to shape 64*512*7*7. torch.reshape(tensor, (64, 512, 1, 1)).expand(64, 512, 7, 7) # normalize F.normalize(tensor, dim=1) Dataloader from torch.utils.data import TensorDataset, Dataset, DataLoader train_ds = TensorDataset(x_train[:50000], y_train[:50000]) valid_ds = TensorDataset(x_train[50000:], y_train[50000:]) train_dl = DataLoader(train_ds, batch_size=mini_batch, shuffle=True, num_workers=4) valid_dl = DataLoader(valid_ds, batch_size=len(valid_ds)) for xb, yb in train_dl: pass 自定义dataset
...