计算机视觉和图像处理
Pytroch
- 一、Pytroch部署
- 二、Pytorch基础语法
-
- [2.1 Pytorch的基本元素操作](#2.1 Pytorch的基本元素操作)
- [2.2 Pytorch的基本运算操作](#2.2 Pytorch的基本运算操作)
- [2.3 Torch Tensor和Numpy array之间的相互转换](#2.3 Torch Tensor和Numpy array之间的相互转换)
- [2.4 Pytorch中的autograd](#2.4 Pytorch中的autograd)
- 三、Pytorch初步用法
-
- [3.1 构建一个神经网络](#3.1 构建一个神经网络)
- [3.2 构建一个分类器](#3.2 构建一个分类器)
一、Pytroch部署
- 创建虚拟环境
在Anaconda终端中创建虚拟环境pytorch
python
conda create --name pytorch
- 激活虚拟环境
python
conda activate pytorch
- 安装Pytorch及其相关库
安装torch、torchvision、torchaudio
python
pip install torch torchvision torchaudio -i https://pypi.tuna.tsinghua.edu.cn/simple
- 查看是否安装成功
python
import torch
print(torch.__version__)
二、Pytorch基础语法
2.1 Pytorch的基本元素操作
python
# 创建一个没有初始化的矩阵
x = torch.empty(5,3)
print(x)
内存中存在脏数据
python
# 创建一个有初始化矩阵
x = torch.rand(5,3)
x
从均匀分布(范围通常是 [0, 1))中生成随机数
python
x = torch.randn(5,3)
x
均值为0方差为1的矩阵
python
torch.randint(0,10,(3,3))
python
# 创建⼀个全零矩阵
torch.zeros(5,3)
python
# 直接通过数据创建张量
torch.tensor([2.5,3.5])
python
# 在已有张量上创建一个新张量
x = x.new_ones(5,3)
print(x)
y = torch.rand_like(x)
print(y)
python
# 张量尺寸
x.size()
python
# 将x中的值取出
# 如果张量中只有一个元素可以直接用.item()取出
for i in range(x.size(0)):
for j in range(x.size(1)):
print(x[i][j].item())
2.2 Pytorch的基本运算操作
python
# 加法操作
y = torch.rand(5,3)
print(x+y)
python
torch.add(x,y)
python
result = torch.empty(5,3)
torch.add(x,y,out=result)
result
python
# 不赋值给y
y.add(x)
python
y
python
# 赋值给y
y.add_(x)
python
y
python
# 支持切片操作
y[:,1]
python
# 改变张量形状
x = torch.rand(2,6)
y = x.view(12)
z = x.view(3,4)
# -1表示自动匹配个数
p = x.view(6,-1)
print(x.size(),y.size(),z.size(),p.size())
2.3 Torch Tensor和Numpy array之间的相互转换
python
a = torch.ones(5)
a
python
# 将Torch tensor转换为Numpy array
b = a.numpy()
b
python
# tensor 和 array 共享底层内存空间,一个改变另一个也会改变
a.add_(1)
a,b
python
a = np.ones(5)
# # 将Numpy array转换为Torch tensor
b = torch.from_numpy(a)
np.add(a,1,out=a)
a,b
2.4 Pytorch中的autograd
python
# requires_grad=True计算梯度
x = torch.ones(2,2,requires_grad=True)
x
python
# 自动继承requires_grad=True这个属性
y = x + 2
y
python
# grad_fn记录了张量是如何通过各种操作生成的。
# None因为x是手动创建的,AddBackward0表示y是通过加法操作创建的
x.grad_fn,y.grad_fn
python
z = y * y * 3
out = z.mean()
z,out
python
a = torch.rand(2,2)
a = (a * a) / (a - 1)
print(a.requires_grad)
python
a.requires_grad_(True)
print(a.requires_grad)
python
b = (a * a).sum()
b.grad_fn
python
# 关于梯度Gradients
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)
y = x * x
out = y.sum()
# 反向传播
out.backward()
print("x.grad:", x.grad)
python
x = torch.rand(2,2,requires_grad=True)
x
python
# 获得一个新的Tensor,跟x拥有相同的内容但不需要自动求导
y = x.detach()
y.requires_grad
python
# 查看x和y元素是否相同
x.eq(y)
python
x.eq(y).all()
三、Pytorch初步用法
3.1 构建一个神经网络
python
import torch
import torch.nn as nn
import torch.nn.functional as F
python
# 定义一个简单的网络类
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
# 卷积
self.conv1 = nn.Conv2d(1,6,3)
self.conv2 = nn.Conv2d(6,16,3)
# 全连接
self.fc1 = nn.Linear(16*6*6,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)
def forward(self,x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x,(2,2))
x = F.max_pool2d(F.relu(self.conv2(x)),(2,2))
print(f"Conv1 output shape: {x.shape}")
x = x.view(-1,self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# 输出层
x = self.fc3(x)
return x
def num_flat_features(self,x):
shapes = x.size()[1:]
features =1
for shape in shapes:
features *= shape
return features
net = Net()
print(net)
python
net = Net()
input = torch.randn(32, 1, 32, 32)
out = net(input)
out
python
# 查看模型中的可训练参数
params = list(net.parameters())
params
python
params[0].size(),params[1].size(),params[2].size()
python
import torch.optim as optim
input = torch.randn(1, 1, 32, 32)
out = net(input)
target = torch.randn(10)
target = target.view(1,-1)
criterion = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=0.01)
loss = criterion(out,target)
# 梯度清零
optimizer.zero_grad()
# 反向传播
loss.backward()
# 更新权重
optimizer.step()
loss
python
# 第二次前向传播和反向传播
out = net(input)
loss = criterion(out,target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss
3.2 构建一个分类器
python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
python
# 数据转换器,将PIL图像转换为PyTorch张量 并进行归一化
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])
# 加载训练集
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,download=True, transform=transform)
# 加载训练数据加载器
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')
python
def imgshow(img):
# 反归一化
img = img / 2 + 0.5 # 假设图像数据已经被归一化到 [-1, 1]
img_np = img.numpy()
plt.figure(figsize=(7, 7))
plt.imshow(np.transpose(img_np, (1, 2, 0)))
plt.show()
# 获取一批数据
dataiter = iter(trainloader)
images, labels = next(dataiter)
# 显示图像
# imgshow(torchvision.utils.make_grid(images))
# 打印标签
print('GroundTruth:',' '.join('%5s' % classes[labels[j]] for j in range(4)))
python
# 定义卷积神经网络
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
python
import torch.optim as optim
# 定义损失函数
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),lr=0.001,momentum=0.9)
# 定义学习率调度器
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
# 训练模型
for epoch in range(2):
running_loss = 0.0
for i,data in enumerate(trainloader,0):
input,label = data
optimizer.zero_grad()
output = net(input)
loss = criterion(output,labels)
loss.backward()
optimizer.step()
# 累加损失值
running_loss += loss.item()
# 每 2000 个批次打印一次损失值
if (i + 1) % 2000 == 0:
print(f'[{epoch + 1}, {i + 1}] loss: {running_loss / 2000:.3f}')
running_loss = 0.0
print('Finished Training')
python
# 获取一组测试数据
dataiter = iter(testloader)
images, labels = next(dataiter)
# 打印原始图⽚
# imgshow(torchvision.utils.make_grid(images))
# 打印真实的标签
print('GroundTruth: ',' '.join('%5s' % classes[labels[j]] for j in range(4)))
python
# ⾸先实例化模型的类对象
net = Net()
# 利⽤模型对图⽚进⾏预测
outputs = net(images)
# 共有10个类别, 采⽤模型计算出的概率最⼤的作为预测的类别
_, predicted = torch.max(outputs, 1)
# 打印预测标签的结果
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))
预测不准确
python
correct = 0
total = 0
# 禁用梯度计算
with torch.no_grad():
for data in testloader:
images,labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
python
class_correct = list(0.0 for i in range(10))
class_total = list(0.0 for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
# squeeze() 确保 c 是一个一维张量,便于后续的逐元素操作
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += int(c[i].item())
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i]))