文章目录
1.张量数据操作和数据类型
1)创建张量
2)数据类型
dtype参数指定数据类型
使用type_as转换数据类型
3)综合实现
创建 文件tensor_create.py
添加 代码如下:
python
# -*- coding: utf-8 -*-
"""
张量创建
"""
import torch
import numpy as np
# 创建Python列表
a = [1.0, 2.0, 3.0]
# 是否为PyTorch张量
print("a = [1.0, 2.0, 3.0]\na是否为PyTorch张量:", torch.is_tensor(a))
print("a[0]: ", a[0])
# 从Python列表中创建PyTorch张量
b = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
print("b = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])")
# 是否是PyTorch张量
print("b是否是PyTorch张量:", torch.is_tensor(b))
print("b中的数据:", b)
# 从Python列表中创建PyTorch张量
c = torch.tensor([[1.0, 4.0], [2.0, 1.0], [3.0, 5.0]])
print("c = torch.tensor([[1.0, 4.0], [2.0, 1.0], [3.0, 5.0]])\nc中的数据:", c)
# 创建3个元素的张量,初始化为1
d = torch.ones(3)
print("d = torch.ones(3)\nd中的数据:", d)
# 创建3*3张量,初始化为0
e = torch.zeros(3, 3)
print("e = torch.zeros(3, 3)\ne中的数据:", e)
print("e中的元素个数:", torch.numel(e))
# 创建1*2*3张量。n表示正态分布,从均值为0方差为1的标准正态分布中抽取一组随机数
f = torch.randn(1, 2, 3)
print("f = torch.randn(1, 2, 3)\n正态分布:")
print("f中的数据:", f)
print("f中的元素个数:", torch.numel(f))
# 创建1*2*3张量。从[0,1)范围内均匀分布中抽取一组随机数
g = torch.rand(1, 2, 3)
print("g = torch.rand(1, 2, 3),均匀分布:")
print("g中的数据:", g)
print("g中的元素个数:", torch.numel(g))
# 创建二维对角矩阵张量
print(f"torch.eye(3):\n{torch.eye(3)}")
print(f"torch.eye(4):\n{torch.eye(4)}")
print(f"torch.eye(3, 4):\n{torch.eye(3, 4)}")
# 在[start, end)区间内创建一维张量序列,step为步长
# 注意torch.range已经弃用,最好只用torch.arange
print(f"torch.arange(1, 4):\n{torch.arange(1, 4)}")
print(f"torch.arange(0, 3, step=0.5):\n{torch.arange(0, 3, step=0.5)}")
# 生成等分间隔的序列
print(f"torch.linspace(1, 4, steps=5):\n{torch.linspace(1, 4, steps=5)}")
print(f"torch.linspace(1, 4, 100):\n{torch.linspace(1, 4, 100)}")
# 创建0~n-1之间随机置乱的张量
print(f"torch.randperm(5):\n{torch.randperm(5)}")
# Numpy数组转换为Tensor
h = np.array([1, 2, 3, 4, 5, 6]).reshape(2, 3)
h2 = torch.from_numpy(h)
print(f"h2 = torch.from_numpy(h):\nh: {h}\nh2: {h2}")
# Tensor转换为Numpy数组
h3 = h2.numpy()
print(f"h3 = h2.numpy():\nh2: {h2}\nh3: {h3}")
# Tensor数据类型
print(f"torch.tensor([1, 2], dtype=torch.float): {torch.tensor([1, 2], dtype=torch.float)}")
print(f"torch.tensor([0, 1], dtype=torch.bool): {torch.tensor([0, 1], dtype=torch.bool)}")
# Tensor数据类型转换
i = torch.FloatTensor([1, 2, 3])
i2 = i.type_as(torch.IntTensor())
print(f"i2 = i.type_as(torch.IntTensor()):\ni: {i}\ni2: {i2}")
2.张量索引、切片、拼接及形状变换
1)索引
2)切片
3)拼接
4)形状变换
5)综合实现
代码如下:
python
"""
张量索引、切片、拼接及形状变换
"""
import torch
# 1 索引
# torch.index_select()在维度dim上按index索引数据
a = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).reshape(3, 2)
#索引第0维的第0行和第2行
a2 = torch.index_select(a, 0, torch.LongTensor([0, 2]))
print(f"a2 = torch.index_select(a, 0, torch.LongTensor([0, 2])):\na: \n{a}\na2: \n{a2}")
# 使用Numpy的索引方式
print(f"a[[0, 2], :]: \n{a[[0, 2], :]}")
#-1表示从后往前数
print(f"a[0:2, [-1]]: \n{a[0:2, [-1]]}")
# torch.masked_select()按mask中取值为True进行筛选
mask = torch.BoolTensor([[0, 1], [1, 0], [0, 1]])
print(f"torch.masked_select(a, mask): \n{torch.masked_select(a, mask)}")
# 值在2.0~4.0之间为1,否则为0
mask = a.ge(2.0) & a.le(4.0)
print(mask)
print(f"torch.masked_select(a, mask): \n{torch.masked_select(a, mask)}")
# 2 切片
# torch.chunk()将张量按维度dim进行平均切分。若不能均分,则最后一份小于其他份
b = torch.arange(10).reshape(5, 2)
#默认0按行切
print(f"torch.chunk(b, 2): \n{torch.chunk(b, 2)}")
#dim取1按列切
print(f"torch.chunk(b, 2, dim=1): \n{torch.chunk(b, 2, dim=1)}")
# torch.split()将张量按维度dim进行切分。当split_size_or_sections为int时,表示块的大小;为list时,按len(split_size_or_sections)切分
b = torch.arange(10).reshape(5, 2)
print(f"torch.split(b, 2): \n{torch.split(b, 2)}")
print(f"torch.split(b, [1, 4]): \n{torch.split(b, [1, 4])}")
# 3 拼接
# torch.cat()将张量按维度dim进行拼接,不扩展张量的维度
c = torch.arange(6).reshape(2, 3)
print(f"torch.cat([c, c], dim=0): \n{torch.cat([c, c], dim=0)}")
print(f"torch.cat([c, c, c], dim=1): \n{torch.cat([c, c, c], dim=1)}")
# torch.stack()在新维度dim上进行拼接,会扩展张量的维度
c = torch.arange(6).reshape(2, 3)
c_s0 = torch.stack([c, c], dim=0) # 在扩展的维度0上进行拼接
print(f"torch.stack([c, c], dim=0): \n{c_s0}\n形状:\n{c_s0.shape}")
c_s2 = torch.stack([c, c], dim=2) # 在扩展的维度2上进行拼接
print(f"torch.stack([c, c], dim=2): \n{c_s2}\n形状:\n{c_s2.shape}")
# 4 形状变换
# torch.reshape()变换张量形状
d = torch.randperm(8)
print(f"d:\n{d}\ntorch.reshape(d, (2, 4)):\n{torch.reshape(d, (2, 4))}")
# torch.transpose()交换张量的两个维度
d = torch.randn((2, 3))
#transpose实际上进行了矩阵的转置
print(f"d:\n{d}\ntorch.transpose(d, 0, 1):\n{torch.transpose(d, 0, 1)}")
# torch.squeeze()压缩指定dim且长度为1的维度。如果dim=None,则压缩全部长度为1的维度
d = torch.zeros((2, 1, 2, 1, 2))
d_s = torch.squeeze(d)
print(f"d.size():\n{d.size()}\nd_s.size():\n{d_s.size()}")
d_s0 = torch.squeeze(d, 0)
print(f"d.size():\n{d.size()}\nd_s0.size():\n{d_s0.size()}")
d_s1 = torch.squeeze(d, 1)
print(f"d.size():\n{d.size()}\nd_s1.size():\n{d_s1.size()}")
# torch.unsqueeze()扩展指定dim的维度,其长度是1
d = torch.arange(4)
print(f"d:\n{d}\ntorch.unsqueeze(d, 0): \n{torch.unsqueeze(d, 0)}")
print(f"d:\n{d}\ntorch.unsqueeze(d, 1): \n{torch.unsqueeze(d, 1)}")
3.张量存储
1)使用索引访问张量存储
2)更改存储实例同时更改对应张量
3)张量索引的三个属性
4)更改子张量影响原张量
5)采用克隆方法以避免更改子张量影响原张量
6)验证转置操作并不影响其存储
7)是否连续张量
8)转化为连续张量
9)综合实现
代码如下:
python
"""
张量存储
"""
import torch
# 二维张量的存储是一维的连续内存
points = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
print(f"points: \n{points}")
print(f"points.storage(): \n{points.storage()}")
# 使用索引访问张量存储
points_storage = points.storage()
print(f"points_storage[0]: \n{points_storage[0]}")
print(f"points.storage()[0]: \n{points.storage()[0]}")
# 更改存储实例同时更改对应张量
points = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
points_storage = points.storage()
points_storage[0] = 111.0
print("更改存储实例后,张量也被更改\n", points)
# 张量索引的三个属性
points = torch.tensor([[1.0, 4.0], [2.0, 5.0], [3.0, 6.0]])
second_point = points[1]
#第二个点在存储中跳过了第一个点,偏移2,第二个点大小和形状都是2;
print(f"second_point.storage_offset():\n{second_point.storage_offset()}")
print(f"second_point.size():\n{second_point.size()}")
print(f"second_point.shape:\n{second_point.shape}")
#张量points的步长为(2,1)
print(f"points.stride():\n{points.stride()}")
print(f"second_point.stride():\n{second_point.stride()}")
# 更改子张量影响原张量
points = torch.tensor([[1.0, 4.0], [2.0, 5.0], [3.0, 6.0]])
second_point = points[1]
second_point[0] = 222.0
print(f"points:\n{points}")
# 采用克隆方法以避免更改子张量影响原张量
points = torch.tensor([[1.0, 4.0], [2.0, 5.0], [3.0, 6.0]])
second_point = points[1].clone()
second_point[0] = 222.0
print(f"points:\n{points}")
# 验证转置操作并不影响其存储
points_t = points.t()
print(f"points_t:\n{points_t}")
print("转置前后的张量存储是否一致:", id(points.storage()) == id(points_t.storage()))
print(f"points.stride():\n{points.stride()}")
print(f"points_t.stride():\n{points_t.stride()}")
# 是否连续
print(f"points.is_contiguous():\n{points.is_contiguous()}")
print(f"points_t.is_contiguous():\n{points_t.is_contiguous()}")
# 转化为连续
points_t_cont = points_t.contiguous()
print(f"points_t_cont.is_contiguous():\n{points_t_cont.is_contiguous()}")
print(f"points_t_cont.storage():\n{points_t_cont.storage()}")
print(f"points_t_cont.stride():\n{points_t_cont.stride()}")
4张量持久化
1)将张量保存为文件
2)将张量保存到HDF5文件中
3)综合实现
创建tensor_indexing_etc.py文件
代码如下:
python
"""
张量序列化
"""
import torch
import h5py
my_tensor = torch.randn((2, 3))
print("要保存的张量:\n", my_tensor)
# 将张量保存为文件方法一
torch.save(my_tensor, '../saved/my_tensor.pt')
# 加载张量方法一
my_tensor = torch.load('../saved/my_tensor.pt')
print("加载方法一的张量:\n", my_tensor)
# 将张量保存为文件方法二
with open('../saved/my_tensor.pt', 'wb') as f:
torch.save(my_tensor, f)
# 加载张量方法二
with open('../saved/my_tensor.pt', 'rb') as f:
my_tensor = torch.load(f)
print("加载方法二的张量:\n", my_tensor)
# 保存到HDF5文件中
with h5py.File('../saved/my_tensor.hdf5', 'w') as f:
data_set = f.create_dataset('my_train', data=my_tensor.numpy())
# 从HDF5文件中加载
with h5py.File('../saved/my_tensor.hdf5', 'r') as f:
data_set = f['my_train']
print(f"data_set.shape:\n{data_set.shape}\ndata_set.dtype:\n{data_set.dtype}")
print(f"data_set[1:]:\n{data_set[1:]}")
print(f"torch.from_numpy(data_set[1:]):\n{torch.from_numpy(data_set[1:])}")
运行结果:
要保存的张量:
tensor([[-0.6169, 2.3068, -0.2230],
[ 0.7826, -0.6816, -0.3101]])
加载方法一的张量:
tensor([[-0.6169, 2.3068, -0.2230],
[ 0.7826, -0.6816, -0.3101]])
加载方法二的张量:
tensor([[-0.6169, 2.3068, -0.2230],
[ 0.7826, -0.6816, -0.3101]])
data_set.shape:
(2, 3)
data_set.dtype:
float32
data_set[1:]:
[[ 0.7826153 -0.6816449 -0.31011662]]
torch.from_numpy(data_set[1:]):
tensor([[ 0.7826, -0.6816, -0.3101]])
5.张量广播
1)相同形状的张量运算
2)标量与张量运算
3)相同维度但不同形状的张量运算
4)不同维度的张量运算
python
"""
张量广播
"""
import torch
# 1 相同形状的张量运算
a1 = torch.arange(3)
a2 = torch.arange(4, 7)
print(f"{a1} + {a2} = \n{a1 + a2}")
# 2 标量与张量运算
a1 = torch.arange(3)
print(f"{a1} + 5 = \n{a1 + 5}")
print(f"{a1} + torch.tensor(5) = \n{a1 + torch.tensor(5)}")
# 3 相同维度但不同形状的张量运算
a1 = torch.arange(12).reshape((3, 4))
a2 = torch.ones((1, 4))
print(f"{a1} + {a2} = \n{a1 + a2}")
a1 = torch.arange(12).reshape((3, 4))
a2 = torch.ones((3, 1))
print(f"{a1} + {a2} \n = {a1 + a2}")
a1 = torch.arange(3).reshape(3, 1)
a2 = torch.arange(4, 7).reshape(1, 3)
print(f"{a1} + {a2} = \n{a1 + a2}")
# 4 不同维度的张量运算
a1 = torch.arange(6).reshape(3, 2)
a2 = torch.arange(4, 6)
print(f"{a1} + {a2} = \n{a1 + a2}")
# 扩展到Pointwise运算
a1 = torch.arange(6).reshape(3, 2)
a2 = torch.arange(1, 3)
print(f"torch.add(a1, a2) = \n{torch.add(a1, a2)}")
print(f"torch.subtract(a1, a2) = \n{torch.subtract(a1, a2)}")
print(f"torch.multiply(a1, a2) = \n{torch.multiply(a1, a2)}")
print(f"torch.divide(a1, a2) = \n{torch.divide(a1, a2)}")
5.在GPU上使用tensor
张量GPU加速
1)查看本机配置信息
2)指定张量设备
3)CPU与GPU张量转换
4)根据配置选择GPU加速
5)综合实现
创建tensor_gpu.py文件
添加代码如下:
python
"""
张量GPU加速
"""
import torch
# GPU是否可用
print("torch.cuda.is_available(): ", torch.cuda.is_available())
# GPU数量
print("torch.cuda.device_count(): ", torch.cuda.device_count())
# 当前GPU设备
print("torch.cuda.current_device(): ", torch.cuda.current_device())
# GPU设备名
print("torch.cuda.get_device_name('cuda:0'): ", torch.cuda.get_device_name('cuda:0'))
# 新建张量
ts = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
# 新建GPU张量
ts_gpu = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device='cuda')
print("torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]):\n", ts)
print("torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device='cuda'):\n", ts_gpu)
# 转换为GPU张量
ts_gpu = ts.to(device='cuda')
print("演示to(device='cuda'):\n", ts_gpu)
# 另一种转换方式
ts_gpu = ts.to(device='cuda:0')
print("演示to(device='cuda:0'):\n", ts_gpu)
# 张量运算
ts = 2 * ts
print("ts = 2 * ts:\n", ts)
# 可以这样转换
ts_gpu = 2 * ts.to(device='cuda')
print("2 * ts.to(device='cuda'):\n", ts_gpu)
# GPU张量运算
ts_gpu = ts_gpu + 10
print("ts_gpu + 10:\n", ts_gpu)
# 转换为CPU张量
ts_cpu = ts_gpu.to(device='cpu')
print("演示to(device='cpu'):\n", ts_cpu)
# 另一种转换方法
ts_gpu = ts.cuda()
print(ts_gpu)
ts_gpu = ts.cuda(0)
print(ts_gpu)
ts_cpu = ts_gpu.cpu()
print(ts_cpu)
# 根据配置选择GPU加速
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# 新建GPU或CPU张量
ts_unknown = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device)
print("根据配置选择GPU加速:\n", ts_unknown)
6.自动求导
张量自动求梯度
1)标量的自动求导
2)矩阵的自动求导
3)综合实现
创建tensor_autograd.py文件
添加代码如下:
python
"""
张量自动求梯度
"""
import torch
# 先以常见的y=wx+b为例来进行说明
x = torch.tensor([2.0])
w = torch.tensor([1.0], requires_grad=True)
b = torch.tensor([0.0], requires_grad=True)
y = w * x + b
print(f"y.grad_fn: {y.grad_fn}")
# 反向传播
y.backward(retain_graph=True)
# 打印导数
print(f"x.grad: {x.grad}")
print(f"w.grad: {w.grad}")
print(f"b.grad: {b.grad}")
# 反向传播之前记得要把梯度清零,否则grad会累加
# 注释以下四条语句看效果
if w.grad is not None:
w.grad.zero_()
if b.grad is not None:
b.grad.zero_()
# 反向传播
y.backward()
# 打印导数
print(f"x.grad: {x.grad}")
print(f"w.grad: {w.grad}")
print(f"b.grad: {b.grad}")
# 再看自动求导的矩阵形式
x = torch.arange(4).view(2, 2).float()
x.requires_grad_(True)
print(f"x: {x}")
print(f"x.grad_fn: {x.grad_fn}")
y = x + 1
print(f"y: {y}")
print(f"y.grad_fn: {y.grad_fn}")
print(f"x.is_leaf: {x.is_leaf}\ny.is_leaf: {y.is_leaf}")
z = y ** 3
out = z.mean()
print(f"z: {z}")
print(f"out: {out}")
# 反向传播
out.backward() # 等价于 out.backward(torch.tensor(1.))
print(f"x.grad: {x.grad}")
7.数据集API
Dataset是 自定义数据集的抽象类
IterableDataset类是一个迭代风格的数据集
使用DataLoader数据加载器将数据集和样本抽样器组合在一起
1)继承Dataset
python
"""
鸢尾花数据集
继承Dataset
"""
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
class IrisDataset(Dataset):
""" 鸢尾花数据集 """
def __init__(self):
super(IrisDataset).__init__()
data = np.loadtxt("../datasets/fisheriris.csv", delimiter=',', dtype=np.float32)
self.x = torch.from_numpy(data[:, 0:-1])
self.y = torch.from_numpy(data[:, [-1]])
self.len = data.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.len
def main():
# 实例化
iris = IrisDataset()
print("第一个样本")
print(iris[0])
irir_loader = DataLoader(dataset=iris, batch_size=10, shuffle=True)
for epoch in range(2):
for i, data in enumerate(irir_loader):
# 从irir_loader中读取数据
inputs, labels = data
# 打印数据集
print(f"轮次:{epoch}\t输入数据形状:{inputs.data.size()}\t标签形状:{labels.data.size()}")
if __name__ == "__main__":
main()
2)继承IterableDataset
python
"""
鸢尾花数据集
继承IterableDataset
"""
import torch
from torch.utils.data import IterableDataset, DataLoader
import numpy as np
class IrisIterDataset(IterableDataset):
""" 鸢尾花数据集 """
def __init__(self):
super(IrisIterDataset).__init__()
data = np.loadtxt("../datasets/fisheriris.csv", delimiter=',', dtype=np.float32)
self.x = torch.from_numpy(data[:, 0:-1])
self.y = torch.from_numpy(data[:, [-1]])
self.len = data.shape[0]
self.idx = 0
def __iter__(self):
self.idx = 0
return self
def __next__(self):
if self.idx < self.len:
# 此处应该从数据库或远程实时得到数据,这里只是示例,因此简单处理一下
idx = self.idx
self.idx += 1
return self.x[idx], self.y[idx]
else:
raise StopIteration
def main():
# 实例化
iris = IrisIterDataset()
print("第一个样本")
print(iris.__next__())
# 继承IterDataset的DataLoader不允许shuffle=True
irir_loader = DataLoader(dataset=iris, batch_size=10)
for epoch in range(2):
for i, data in enumerate(irir_loader):
# 从irir_loader中读取数据,一批10个样本
inputs, labels = data
# 打印数据集
print(f"轮次:{epoch}\t输入数据形状:{inputs.data.size()}\t标签形状:{labels.data.size()}")
if __name__ == "__main__":
main()
8.torchvision工具
使用torchvision工具帮助处理图像和视频,主要应用于计算机视觉领域
1)编写简单的图像数据集
显示猫狗数据集
创建dogs_cats_dataset.py,代码如下:
python
"""
猫狗数据集
"""
import torch
from torch.utils.data import Dataset
import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
class DogsCatsDataset(Dataset):
""" 猫狗数据集定义 """
def __init__(self, root):
# root为图片的路径
imgs = os.listdir(root)
# 仅保存图片完整路径
self.imgs = [os.path.join(root, img) for img in imgs]
def __getitem__(self, idx):
img_path = self.imgs[idx]
# 根据文件名确定标签。dog标签1,cat标签0
label = 1 if 'dog' in img_path.split('/')[-1] else 0
pil_img = Image.open(img_path)
img_array = np.array(pil_img) # 形状:(H, W, C), 取值范围:[0, 255], 通道:RGB
img = torch.from_numpy(img_array)
return img, label
def __len__(self):
return len(self.imgs)
def main():
""" 测试 """
dataset = DogsCatsDataset("../datasets/kaggledogvscat/original_train")
img, label = dataset[1]
print("第一个样本")
print("图像形状:", img.shape)
plt.imshow(img)
plt.show()
# 打印前10张图片的形状和标签
i = 0
for img, label in dataset:
print(img.shape, label)
i += 1
if i >= 10:
break
if __name__ == "__main__":
main()
2)transforms模块
模块提供对PIL Image对象的图像转换功能,改模块提供了常用预处理功能的开箱即用实现,如填充、裁剪、灰度模式、线性变换、将图像转换成张量,以及数据增强功能。
创建dogs_cats_transforms.py,代码如下:
python
"""
猫狗数据集
"""
import torch
from torch.utils.data import Dataset
import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
my_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
class DogsCatsDataset(Dataset):
""" 猫狗数据集定义 """
def __init__(self, root):
# root为图片的路径
imgs = os.listdir(root)
# 仅保存图片完整路径
self.imgs = [os.path.join(root, img) for img in imgs]
self.transforms = my_transform
def __getitem__(self, idx):
img_path = self.imgs[idx]
# 根据文件名确定标签。dog标签1,cat标签0
label = 1 if 'dog' in img_path.split('/')[-1] else 0
pil_img = Image.open(img_path)
if self.transforms:
img = self.transforms(pil_img)
return img, label
def __len__(self):
return len(self.imgs)
def main():
""" 测试 """
dataset = DogsCatsDataset("../datasets/kaggledogvscat/original_train")
img, label = dataset[1]
print("第一个样本")
print("图像形状:", img.shape) # 形状为(C, H, W)
# 需要转换为(H, W, C)才能显示。以下三种转换方法可任选一种
# plt.imshow(np.transpose(img.numpy(), (1, 2, 0)))
# plt.imshow(img.permute(1, 2, 0))
plt.imshow(transforms.ToPILImage()(img))
plt.show()
# 打印前10张图片的形状和标签
i = 0
for img, label in dataset:
print(img.shape, label)
i += 1
if i >= 10:
break
if __name__ == "__main__":
main()
3)Normalize归一化
在transforms中通常使用Normalize来规范化张量图像,这时就需要计算数据集的均值和标准差。
python
"""
计算指定图像数据集的均值和标准差
"""
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
# 数据变换
transform_train = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) # RGB归一化的均值和标准差
])
def get_mu_sigma(train_dataset, dim=3):
""" 计算指定图像数据集的均值和标准差 """
print("计算指定图像数据集的均值和标准差")
data_length = len(train_dataset)
print("数据集大小:", data_length)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False)
mean = torch.zeros(dim)
std = torch.zeros(dim)
for img, _ in train_loader:
# 计算并累加每张图片的均值和标准差
for d in range(dim):
mean[d] += img[:, d, :, :].mean()
std[d] += img[:, d, :, :].std()
# 求平均
mean.div_(data_length)
std.div_(data_length)
return mean, std
def main():
# 训练数据集
train_dataset = torchvision.datasets.CIFAR10(root='../datasets/CIFAR10', train=True, download=False,
transform=transform_train)
print("计算出来的均值和标准差:", get_mu_sigma(train_dataset))
if __name__ == '__main__':
main()
4)使用ImageFolder读取猫狗数据集
ImageFolder是一种通用的的数据加载器
python
"""
使用ImageFolder读取猫狗数据集
"""
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import os
def main():
batch_size = 16 # 批大小
# 猫狗数据集图片目录
imgs_dir = '../datasets/kaggledogvscat/small'
# 图像转换
data_transform = transforms.Compose([transforms.Resize([64, 64]),
transforms.ToTensor()])
# 数据集
dog_vs_cat_datasets = {ds_type: datasets.ImageFolder(root=os.path.join(imgs_dir, ds_type),
transform=data_transform)
for ds_type in ["train", "validation", "test"]}
# 数据加载器
data_loader = {ds_type: DataLoader(dataset=dog_vs_cat_datasets[ds_type],
batch_size=batch_size,
shuffle=(ds_type == "train"))
for ds_type in ["train", "validation", "test"]}
x_train, y_train = next(iter(data_loader["train"]))
print(f'训练集样本个数:{len(x_train)}')
print(f'训练集标签个数:{len(y_train)}')
print('训练集样本形状:', x_train.shape)
print('训练集标签形状:', y_train.shape)
index_classes = dog_vs_cat_datasets["train"].class_to_idx
print("数据集类别索引:")
# 输出为:{'cats': 0, 'dogs': 1}
print(index_classes)
class_names = dog_vs_cat_datasets["train"].classes
print("数据集类别:")
# 输出为:['cats', 'dogs']
print(class_names)
img_path = dog_vs_cat_datasets["train"].imgs
print("图片路径:")
# 输出为:[('../datasets/kaggledogvscat/small\\train\\cats\\cat.0.jpg', 0), ...]
print(img_path)
# 显示图片
fig = plt.figure(figsize=(8, 3))
for i in range(batch_size):
# ax = fig.add_subplot(2, batch_size / 2, 1 + i, xticks=[], yticks=[])
ax = fig.add_subplot(2, int(batch_size / 2), 1 + i, xticks=[], yticks=[])
ax.set_title(class_names[y_train[i]])
img = x_train[i]
plt.imshow(img.permute(1, 2, 0)) # (C, H, W) --> (H, W, C)
plt.show()
if __name__ == '__main__':
main()
9.torchtext工具
torchtext工具帮助处理NLP任务。
1)文本预处理
任务包括:读入文本数据、拆分句子并分词、创建词典。
创建text_preprocess.py文件
添加代码如下:
python
# -*- coding: utf-8 -*-
"""
文本预处理示例
"""
import collections
import gzip
def read_text(filename):
""" 打开文件返回读出的各行 """
with gzip.open(filename, 'rt') as f:
lines = [line.strip().lower() for line in f]
return lines
def tokenize(sentences):
""" 将句子划分为单词 """
return [sentence.split(' ') for sentence in sentences]
def count_corpus(sentences):
""" 返回统计单词出现次数的字典 """
tokens = [tk for st in sentences for tk in st]
return collections.Counter(tokens)
class Vocab(object):
""" 词典类 """
def __init__(self, tokens, min_freq=0, use_special=False):
counter = count_corpus(tokens)
# 词频
self.token_freqs = list(counter.items())
# 索引映射为单词。这里的列表只记录单词,索引就是单词在列表中的下标
self.idx_to_token = []
# 如果use_special为真,则四个特殊符号都保留,否则只使用<unk>符号
if use_special:
self.pad, self.bos, self.eos, self.unk = (0, 1, 2, 3)
self.idx_to_token += ['<pad>', '<bos>', '<eos>', '<unk>']
else:
self.unk = 0
self.idx_to_token += ['<unk>']
# 将新词加入到idx_to_token列表中
self.idx_to_token += [token for token, freq in self.token_freqs
if freq >= min_freq and token not in self.idx_to_token]
# 单词映射为索引的词典
self.token_to_idx = dict()
for idx, token in enumerate(self.idx_to_token):
self.token_to_idx[token] = idx
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def main():
# 读取数据文本
lines = read_text("../datasets/shakespeare.txt.gz")
print("句子总数: %d" % len(lines))
# 分词
tokens = tokenize(lines)
print("打印前2句:\n", tokens[0: 2])
# 构建词典
# vocab = Vocab(tokens, 0, True)
vocab = Vocab(tokens)
print("将字典中前10个词的单词映射为索引:")
print(list(vocab.token_to_idx.items())[0: 10])
for i in range(2):
print("单词:", tokens[i])
print("索引:", vocab[tokens[i]])
if __name__ == "__main__":
main()
运行结果:句子总数: 40000
打印前2句:
[['first', 'citizen:'], ['before', 'we', 'proceed', 'any', 'further,', 'hear', 'me', 'speak.']]
将字典中前10个词的单词映射为索引:
[('', 0), ('first', 1), ('citizen:', 2), ('before', 3), ('we', 4), ('proceed', 5), ('any', 6), ('further,', 7), ('hear', 8), ('me', 9)]
单词: ['first', 'citizen:']
索引: [1, 2]
单词: ['before', 'we', 'proceed', 'any', 'further,', 'hear', 'me', 'speak.']
索引: [3, 4, 5, 6, 7, 8, 9, 10]
2)使用torchtext
使用 Spacy工具分词,它为除英语以外的其它语言的 tokenization 提供强大的支持。
首先加载 Multi30k训练集并构建源语言和目标语言的词典;然后设置未登录词为''符合;最后打印训练集的前5个源语言和目标语言的句子。
使用torchtext工具进行分词和构建词典,代码文件multi30k_torchtext.py
python
"""
加载并显示Multi30k数据集
"""
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.datasets import Multi30k
# 源语言和目标语言
src_lang = 'de'
tgt_lang = 'en'
def yield_tokens(data_iter, language, tokenizer):
""" 输出符号列表的辅助函数 """
language_index = {src_lang: 0, tgt_lang: 1}
# 迭代输出符号列表
for sentence in data_iter:
yield tokenizer[language](sentence[language_index[language]])
def main():
""" 主函数 """
# 分词器和词典
tokenizer = {}
vocab = {}
# 创建源语言和目标语言的分词器,需要预装spaCy模块
tokenizer[src_lang] = get_tokenizer('spacy', language='de_core_news_sm')
tokenizer[tgt_lang] = get_tokenizer('spacy', language='en_core_web_sm')
# 定义四种特殊标记及对应索引。unk未知,pad填充,bos序列开始,eos序列结束
special_symbols = ['<unk>', '<pad>', '<bos>', '<eos>']
unk_idx, pad_idx, bos_idx, eos_idx = 0, 1, 2, 3
# 迭代源语言和目标语言以构建词典
for lang in [src_lang, tgt_lang]:
# 训练数据迭代器
train_iter = Multi30k(root='../datasets', split='train', language_pair=(src_lang, tgt_lang))
# 从迭代器中构建torchtext的Vocab对象
vocab[lang] = build_vocab_from_iterator(yield_tokens(train_iter, lang, tokenizer),
min_freq=1,
specials=special_symbols,
special_first=True)
# 设置未登录词为'<unk>'
for lang in [src_lang, tgt_lang]:
vocab[lang].set_default_index(unk_idx)
print(f"{lang}语言的词典长度:{len(vocab[lang])}")
print()
print("打印前5个源语言和目标语言的句子:")
train_iter = Multi30k(root='../datasets', split='train', language_pair=(src_lang, tgt_lang))
for i in range(5):
pair = next(train_iter)
print(pair)
for lang in [src_lang, tgt_lang]:
lang_idx = {src_lang: 0, tgt_lang: 1}
tokens = tokenizer[lang](pair[lang_idx[lang]])
# 打印分词后的单词
print(tokens)
# 打印单词的词典序号
print([vocab[lang].get_stoi()[w] for w in tokens])
print()
if __name__ == '__main__':
main()