python
复制代码
import torch
x=torch.randn(4,4)
x.size()
#torch.Size([4,4]}
y=x.view(16)
y.size()
#torch.Size([16])
z=x.view(-1,8) #the size -1 is inferred from other dimensions
z.size()
#torch.Size([2,8])
a=torch.randn(1,2,3,4)
a.size()
#torch.Size([1,2,3,4])
b=a.transpose(1,2) #swap 2nd and 3rd dimension
b.size()
#torch.Size([1,3,2,4])
c=a.view(1,3,2,4) #Does not change tensor layout in memeory
c.size()
#torch.Size([1,3,2,4])
torch.equal(b,c)
#Flase
python
复制代码
import torch
x=torch.randn(4,4)
x
#tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
# [-0.1520, 0.7472, 0.5617, -0.8649],
# [-2.4724, -0.0334, -0.2976, -0.8499],
# [-0.2109, 1.9913, -0.9607, -0.6123]])
x.dtype
#torch.float32
y=x.view(torch.int32)
y
# tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
# [-1105482831, 1061112040, 1057999968, -1084397505],
# [-1071760287, -1123489973, -1097310419, -1084649136],
# [-1101533110, 1073668768, -1082790149, -1088634448]],
# dtype=torch.int32)
y[0,0]=1000000000
x
# tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
# [-0.1520, 0.7472, 0.5617, -0.8649],
# [-2.4724, -0.0334, -0.2976, -0.8499],
# [-0.2109, 1.9913, -0.9607, -0.6123]])
x.view(torch.cfloat)
# tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
# [-0.1520+0.7472j, 0.5617-0.8649j],
# [-2.4724-0.0334j, -0.2976-0.8499j],
# [-0.2109+1.9913j, -0.9607-0.6123j]])
x.view(torch.cfloat).size()
#torch.Size([4,2])
x.view(torch.uint8)
# tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
# 8, 191],
# [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
# 93, 191],
# [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
# 89, 191],
# [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
# 28, 191]], dtype=torch.uint8)
x.view(torch.uint8).size()
#torch.Size([4,16])