这个是用来增加深度学习的知识面或者就是记录一些常用的命令,会不断的更新
python
import torchvision.transforms as transforms
toPIL = transforms.ToPILImage()#可以把tensor转换为Image类型的
img=toPIL(img)
#利用save就可以保存下来
img.save("/opt/data/private/stable_signature-main/output/dogtes.jpg")
totensor=transforms.ToTensor()
img=totensor(img)#此时img是其他类型的,但是通过这个用法以后可以转变为tensor
python
随机种子数的设定
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
python
读取数据用的,可以直接拿来用
import numpy as np
from PIL import Image
from torchvision import transforms
import torch
import os
from torch.utils.data import DataLoader, Subset
from torchvision.datasets.folder import is_image_file, default_loader
normalize_vqgan = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(),
normalize_vqgan,
])
def get_image_paths(path):
paths = []
for path, _, files in os.walk(path):
for filename in files:
paths.append(os.path.join(path, filename))
return sorted([fn for fn in paths if is_image_file(fn)])
class ImageFolder:
"""An image folder dataset intended for self-supervised learning."""
def __init__(self, path, transform=None, loader=default_loader):
self.samples = get_image_paths(path)
self.loader = loader
self.transform = transform
def __getitem__(self, idx: int):
assert 0 <= idx < len(self)
img = self.loader(self.samples[idx])
if self.transform:
return self.transform(img)
return img
def __len__(self):
return len(self.samples)
def collate_fn(batch):
""" Collate function for data loader. Allows to have img of different size"""
return batch
def get_dataloader(data_dir, transform, batch_size=128, num_imgs=None, shuffle=False, num_workers=4, collate_fn=collate_fn):
dataset = ImageFolder(data_dir, transform=transform)
if num_imgs is not None:
dataset = Subset(dataset, np.random.choice(len(dataset), num_imgs, replace=False))
return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True, drop_last=False, collate_fn=collate_fn)
train_loader = get_dataloader(train_dir, transform, batch_size, num_imgs=batch_size*1200, shuffle=True, num_workers=4, collate_fn=None)#调用代码
python
调整学习率可以直接用,放在你训练的for循环里面
def adjust_learning_rate(optimizer, step, steps, warmup_steps, blr, min_lr=1e-6):
"""Decay the learning rate with half-cycle cosine after warmup"""
if step < warmup_steps:
lr = blr * step / warmup_steps
else:
lr = min_lr + (blr - min_lr) * 0.5 * (1. + math.cos(math.pi * (step - warmup_steps) / (steps - warmup_steps)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
举一个例子来说,steps代表了100次训练,step代表了当前的第几步
在PyTorch中,unsqueeze函数用于在指定维度上增加一个维度
python
import torch
x = torch.randn(3, 4) # 创建一个形状为 (3, 4) 的张量
print(x.shape) # 输出: torch.Size([3, 4])
#在维度0上增加一个维度
y = torch.unsqueeze(x, 0)
print(y.shape) # 输出: torch.Size([1, 3, 4])
在PyTorch中,squeeze函数用于在指定维度上增加一个维度