pytorch -- torch.nn网络结构

1. 官网描述

官网

使用 torch.nn 模块,我们可以创建自定义的神经网络模型,并使用 PyTorch 提供的优化器(如 torch.optim)和损失函数来训练和优化模型。

2. 常见结构

1.卷积操作

定义:

二维卷积

1.1版本 nn.functional.conv2d

python 复制代码
torch.nn.functional.conv2d(input,
weight,
bias=None,
stride=1,
padding=0,
dilation=1, groups=1)

例子:

代码:

python 复制代码
import torch
import torch.nn.functional as F
input = torch.tensor([[1,2,0,3,1],
                      [0,1,2,3,1],
                      [1,2,1,0,0],
                      [5,2,3,1,1],
                      [2,1,0,1,1]])
kernel = torch.tensor([[1,2,1],
                       [0,1,0],
                       [2,1,0]])
print(input.shape,kernel.shape)#5,5 3,3
input = torch.reshape(input,(1,1,5,5))
kernel = torch.reshape(kernel,(1,1,3,3))

output = F.conv2d(input,kernel,stride=1)
print(output)

output2 = F.conv2d(input,kernel,stride=2)
print(output2)
# padding=1 上下左右加一圈0
output3 = F.conv2d(input,kernel,stride=1,padding=1)
print(output3)

运行:

1.2版本 torch.nn.Conv2d

python 复制代码
torch.nn.Conv2d(
in_channels,    输入通道数
out_channels,   输出通道数
kernel_size,    卷积核大小(int or turple)3 (1,2)
stride=1,       卷积核移动的步长
padding=0,      边界填充(加几圈0)
dilation=1,groups=1, bias=True, padding_mode='zeros', device=None, dtype=None)

代码

python 复制代码
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10('data',train=False,
                                       transform=torchvision.transforms.ToTensor(),
                                       download=True)

dataloader = DataLoader(dataset,batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.conv1 = nn.Conv2d(3,6,3,stride=1,padding=0)

    def forward(self,x):
        x = self.conv1(x)
        return x

tudui = Tudui()

writer = SummaryWriter('logs')
step = 0
for data in dataloader:
    imgs,targets = data
    output = tudui(imgs)
    print(output.shape)
    # 输入大小
    writer.add_images('input',imgs,step)
    # 输出大小
    output = torch.reshape(output,(-1,3,30,30))
    writer.add_images('output', output, step)
    step+=1

输出:


2.池化操作

目的:保留输入特征,减小数据量

最大池化MaxPool2d:下采样

torch.nn.MaxPool2d(

kernel_size, int/tuple 窗口

stride=None, 步长(默认值是Kernel_size)

padding=0, 在外面加一圈

dilation=1, (空洞卷积)

return_indices=False,

ceil_mode=False ceil模式:向上取整

)

例子:

python 复制代码
import torch
import torchvision.datasets
from torch import nn

input = torch.tensor([[1,2,0,3,1],
                      [0,1,2,3,1],
                      [1,2,1,0,0],
                      [5,2,3,1,1],
                      [2,1,0,1,1]])
# (N,C,Hin,Win) (C,Hin,Win)  -1表示自己计算batchsize,
input = torch.reshape(input,(-1,1,5,5))
print(input.shape)
class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.maxpool1 = nn.MaxPool2d(kernel_size=3,ceil_mode=True)# 模板创建自己的

    def forward(self,input):
        output = self.maxpool1(input)#输入
        return output

tudui = Tudui()
output = tudui(input)
print(output)

输出

例子2

代码

python 复制代码
import torch
import torchvision.datasets
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.maxpool1 = nn.MaxPool2d(kernel_size=3,ceil_mode=True)# 模板创建自己的

    def forward(self,input):
        output = self.maxpool1(input)#输入
        return output

tudui = Tudui()

dataset = torchvision.datasets.CIFAR10('data',train=False,download=True,transform=torchvision.transforms.ToTensor())

dataloader = DataLoader(dataset,batch_size=64)

writer = SummaryWriter('logs_maxpool')
step = 0
for data in dataloader:
    imgs,targets = data
    writer.add_images('input', imgs, step)
    output = tudui(imgs)
    writer.add_images('maxpool',output,step)
    step+=1
writer.close()

输出

3.非线性激活

python 复制代码
torch.nn.ReLU(inplace=False)
torch.nn.Sigmoid(*args, **kwargs)

代码:

python 复制代码
import torch
from torch import nn

input = torch.tensor([[1,-0.5],
                      [-1,3]])
input = torch.reshape(input,(-1,1,2,2))

class Tudui(nn.Module):
    def __init__(self):
        super().__init__()
        self.relu1 = nn.ReLU()
    def forward(self,input):
        output = self.relu1(input)
        return output

tudui = Tudui()
output = tudui(input)
print(output)

运行

例子2:

代码:

python 复制代码
import torch
import torchvision.datasets
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

# sigmoid
dataset = torchvision.datasets.CIFAR10('data',train=False,download=True,transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset,batch_size=64)
class Tudui(nn.Module):
    def __init__(self):
        super().__init__()
        self.sigmoid1 = nn.Sigmoid()
    def forward(self,input):
        output = self.sigmoid1(input)
        return output

tudui = Tudui()
writer = SummaryWriter('logs_sigmoid')
step = 0
for data in dataloader:
    imgs,targets = data
    writer.add_images('input',imgs,step)
    output = tudui(imgs)
    writer.add_images('output', output, step)
    print(output.shape)
    step+=1

writer.close()

输出


4.线性层

torch.nn.Linear(

in_features,

out_features,

bias=True,

device=None, dtype=None)

代码:

python 复制代码
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10('data',train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset,64,drop_last=True)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.linear1 = nn.Linear(in_features=196608,out_features=10)
    def forward(self,input):
        return self.linear1(input)

tudui = Tudui()

for data in dataloader:
    imgs, targets = data
    # output = torch.flatten(imgs)    #torch.Size([196608])
    # 替换展平
    output = torch.reshape(imgs,(1,1,1,-1)) #torch.Size([1, 1, 1, 196608])
    print(output.shape)
    output = tudui(output)            #torch.Size([10]) torch.Size([1, 1, 1, 10])
    print(output.shape)

4.序列化层(sequential)

例子:

代码:

python 复制代码
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter


class Tudui(nn.Module):
    def __init__(self):
        super().__init__()
        # 草稿中计算 stride padding
        self.conv1 = nn.Conv2d(in_channels=3,out_channels=32,kernel_size=5,stride=1,padding=2)
        self.maxpool1 = nn.MaxPool2d(2)
        self.conv2 = nn.Conv2d(in_channels=32,out_channels=32,kernel_size=5,padding=2)
        self.maxpool2 = nn.MaxPool2d(2)
        self.conv3 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding=2)
        self.maxpool3 = nn.MaxPool2d(2)
        self.flatten = nn.Flatten()
        #64是隐藏单元数
        self.linear1 = nn.Linear(in_features=1024,out_features=64)
        #10是输出类别
        self.linear2 = nn.Linear(in_features=64, out_features=10)
        # 另一种写法
        self.model1 = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(in_features=1024, out_features=64),
            nn.Linear(in_features=64, out_features=10)
        )

    def forward(self,x):
        # x = self.conv1(x)
        # x = self.maxpool1(x)
        # x = self.conv2(x)
        # x = self.maxpool2(x)
        # x = self.conv3(x)
        # x = self.maxpool3(x)
        # x = self.flatten(x)
        # x = self.linear1(x)
        # x = self.linear2(x)
        # sequential方式
        x = self.model1(x)
        return x

tudui = Tudui()
print(tudui)
# 用来检验网络结构参数
input = torch.ones((64,3,32,32))# 64 batchsize(64张)
output = tudui(input)

writer= SummaryWriter('logs_s')
writer.add_graph(tudui,input)
writer.close()

输出

相关推荐
咸鱼桨12 分钟前
《庐山派从入门到...》PWM板载蜂鸣器
人工智能·windows·python·k230·庐山派
强哥之神23 分钟前
Nexa AI发布OmniAudio-2.6B:一款快速的音频语言模型,专为边缘部署设计
人工智能·深度学习·机器学习·语言模型·自然语言处理·音视频·openai
yusaisai大鱼27 分钟前
tensorflow_probability与tensorflow版本依赖关系
人工智能·python·tensorflow
18号房客27 分钟前
一个简单的深度学习模型例程,使用Keras(基于TensorFlow)构建一个卷积神经网络(CNN)来分类MNIST手写数字数据集。
人工智能·深度学习·机器学习·生成对抗网络·语言模型·自然语言处理·tensorflow
Biomamba生信基地31 分钟前
R语言基础| 功效分析
开发语言·python·r语言·医药
神秘的土鸡35 分钟前
神经网络图像隐写术:用AI隐藏信息的艺术
人工智能·深度学习·神经网络
数据分析能量站36 分钟前
神经网络-LeNet
人工智能·深度学习·神经网络·机器学习
Jaly_W44 分钟前
用于航空发动机故障诊断的深度分层排序网络
人工智能·深度学习·故障诊断·航空发动机
CodeClimb1 小时前
【华为OD-E卷-木板 100分(python、java、c++、js、c)】
java·javascript·c++·python·华为od
小嗷犬1 小时前
【论文笔记】Cross-lingual few-shot sign language recognition
论文阅读·人工智能·多模态·少样本·手语翻译