pytorch -- torch.nn网络结构

1. 官网描述

官网

使用 torch.nn 模块,我们可以创建自定义的神经网络模型,并使用 PyTorch 提供的优化器(如 torch.optim)和损失函数来训练和优化模型。

2. 常见结构

1.卷积操作

定义:

二维卷积

1.1版本 nn.functional.conv2d

python 复制代码
torch.nn.functional.conv2d(input,
weight,
bias=None,
stride=1,
padding=0,
dilation=1, groups=1)

例子:

代码:

python 复制代码
import torch
import torch.nn.functional as F
input = torch.tensor([[1,2,0,3,1],
                      [0,1,2,3,1],
                      [1,2,1,0,0],
                      [5,2,3,1,1],
                      [2,1,0,1,1]])
kernel = torch.tensor([[1,2,1],
                       [0,1,0],
                       [2,1,0]])
print(input.shape,kernel.shape)#5,5 3,3
input = torch.reshape(input,(1,1,5,5))
kernel = torch.reshape(kernel,(1,1,3,3))

output = F.conv2d(input,kernel,stride=1)
print(output)

output2 = F.conv2d(input,kernel,stride=2)
print(output2)
# padding=1 上下左右加一圈0
output3 = F.conv2d(input,kernel,stride=1,padding=1)
print(output3)

运行:

1.2版本 torch.nn.Conv2d

python 复制代码
torch.nn.Conv2d(
in_channels,    输入通道数
out_channels,   输出通道数
kernel_size,    卷积核大小(int or turple)3 (1,2)
stride=1,       卷积核移动的步长
padding=0,      边界填充(加几圈0)
dilation=1,groups=1, bias=True, padding_mode='zeros', device=None, dtype=None)

代码

python 复制代码
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10('data',train=False,
                                       transform=torchvision.transforms.ToTensor(),
                                       download=True)

dataloader = DataLoader(dataset,batch_size=64)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.conv1 = nn.Conv2d(3,6,3,stride=1,padding=0)

    def forward(self,x):
        x = self.conv1(x)
        return x

tudui = Tudui()

writer = SummaryWriter('logs')
step = 0
for data in dataloader:
    imgs,targets = data
    output = tudui(imgs)
    print(output.shape)
    # 输入大小
    writer.add_images('input',imgs,step)
    # 输出大小
    output = torch.reshape(output,(-1,3,30,30))
    writer.add_images('output', output, step)
    step+=1

输出:


2.池化操作

目的:保留输入特征,减小数据量

最大池化MaxPool2d:下采样

torch.nn.MaxPool2d(

kernel_size, int/tuple 窗口

stride=None, 步长(默认值是Kernel_size)

padding=0, 在外面加一圈

dilation=1, (空洞卷积)

return_indices=False,

ceil_mode=False ceil模式:向上取整

)

例子:

python 复制代码
import torch
import torchvision.datasets
from torch import nn

input = torch.tensor([[1,2,0,3,1],
                      [0,1,2,3,1],
                      [1,2,1,0,0],
                      [5,2,3,1,1],
                      [2,1,0,1,1]])
# (N,C,Hin,Win) (C,Hin,Win)  -1表示自己计算batchsize,
input = torch.reshape(input,(-1,1,5,5))
print(input.shape)
class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.maxpool1 = nn.MaxPool2d(kernel_size=3,ceil_mode=True)# 模板创建自己的

    def forward(self,input):
        output = self.maxpool1(input)#输入
        return output

tudui = Tudui()
output = tudui(input)
print(output)

输出

例子2

代码

python 复制代码
import torch
import torchvision.datasets
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.maxpool1 = nn.MaxPool2d(kernel_size=3,ceil_mode=True)# 模板创建自己的

    def forward(self,input):
        output = self.maxpool1(input)#输入
        return output

tudui = Tudui()

dataset = torchvision.datasets.CIFAR10('data',train=False,download=True,transform=torchvision.transforms.ToTensor())

dataloader = DataLoader(dataset,batch_size=64)

writer = SummaryWriter('logs_maxpool')
step = 0
for data in dataloader:
    imgs,targets = data
    writer.add_images('input', imgs, step)
    output = tudui(imgs)
    writer.add_images('maxpool',output,step)
    step+=1
writer.close()

输出

3.非线性激活

python 复制代码
torch.nn.ReLU(inplace=False)
torch.nn.Sigmoid(*args, **kwargs)

代码:

python 复制代码
import torch
from torch import nn

input = torch.tensor([[1,-0.5],
                      [-1,3]])
input = torch.reshape(input,(-1,1,2,2))

class Tudui(nn.Module):
    def __init__(self):
        super().__init__()
        self.relu1 = nn.ReLU()
    def forward(self,input):
        output = self.relu1(input)
        return output

tudui = Tudui()
output = tudui(input)
print(output)

运行

例子2:

代码:

python 复制代码
import torch
import torchvision.datasets
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

# sigmoid
dataset = torchvision.datasets.CIFAR10('data',train=False,download=True,transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset,batch_size=64)
class Tudui(nn.Module):
    def __init__(self):
        super().__init__()
        self.sigmoid1 = nn.Sigmoid()
    def forward(self,input):
        output = self.sigmoid1(input)
        return output

tudui = Tudui()
writer = SummaryWriter('logs_sigmoid')
step = 0
for data in dataloader:
    imgs,targets = data
    writer.add_images('input',imgs,step)
    output = tudui(imgs)
    writer.add_images('output', output, step)
    print(output.shape)
    step+=1

writer.close()

输出


4.线性层

torch.nn.Linear(

in_features,

out_features,

bias=True,

device=None, dtype=None)

代码:

python 复制代码
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10('data',train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader = DataLoader(dataset,64,drop_last=True)

class Tudui(nn.Module):
    def __init__(self):
        super(Tudui,self).__init__()
        self.linear1 = nn.Linear(in_features=196608,out_features=10)
    def forward(self,input):
        return self.linear1(input)

tudui = Tudui()

for data in dataloader:
    imgs, targets = data
    # output = torch.flatten(imgs)    #torch.Size([196608])
    # 替换展平
    output = torch.reshape(imgs,(1,1,1,-1)) #torch.Size([1, 1, 1, 196608])
    print(output.shape)
    output = tudui(output)            #torch.Size([10]) torch.Size([1, 1, 1, 10])
    print(output.shape)

4.序列化层(sequential)

例子:

代码:

python 复制代码
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter


class Tudui(nn.Module):
    def __init__(self):
        super().__init__()
        # 草稿中计算 stride padding
        self.conv1 = nn.Conv2d(in_channels=3,out_channels=32,kernel_size=5,stride=1,padding=2)
        self.maxpool1 = nn.MaxPool2d(2)
        self.conv2 = nn.Conv2d(in_channels=32,out_channels=32,kernel_size=5,padding=2)
        self.maxpool2 = nn.MaxPool2d(2)
        self.conv3 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding=2)
        self.maxpool3 = nn.MaxPool2d(2)
        self.flatten = nn.Flatten()
        #64是隐藏单元数
        self.linear1 = nn.Linear(in_features=1024,out_features=64)
        #10是输出类别
        self.linear2 = nn.Linear(in_features=64, out_features=10)
        # 另一种写法
        self.model1 = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(in_features=1024, out_features=64),
            nn.Linear(in_features=64, out_features=10)
        )

    def forward(self,x):
        # x = self.conv1(x)
        # x = self.maxpool1(x)
        # x = self.conv2(x)
        # x = self.maxpool2(x)
        # x = self.conv3(x)
        # x = self.maxpool3(x)
        # x = self.flatten(x)
        # x = self.linear1(x)
        # x = self.linear2(x)
        # sequential方式
        x = self.model1(x)
        return x

tudui = Tudui()
print(tudui)
# 用来检验网络结构参数
input = torch.ones((64,3,32,32))# 64 batchsize(64张)
output = tudui(input)

writer= SummaryWriter('logs_s')
writer.add_graph(tudui,input)
writer.close()

输出

相关推荐
DO_Community3 小时前
普通服务器都能跑:深入了解 Qwen3-Next-80B-A3B-Instruct
人工智能·开源·llm·大语言模型·qwen
WWZZ20253 小时前
快速上手大模型:机器学习3(多元线性回归及梯度、向量化、正规方程)
人工智能·算法·机器学习·机器人·slam·具身感知
deephub3 小时前
深入BERT内核:用数学解密掩码语言模型的工作原理
人工智能·深度学习·语言模型·bert·transformer
PKNLP3 小时前
BERT系列模型
人工智能·深度学习·bert
应用市场4 小时前
构建自定义命令行工具 - 打造专属指令体
开发语言·windows·python
兰亭妙微4 小时前
ui设计公司审美积累 | 金融人工智能与用户体验 用户界面仪表盘设计
人工智能·金融·ux
东方佑4 小时前
从字符串中提取重复子串的Python算法解析
windows·python·算法
IT_Octopus4 小时前
triton backend 模式docker 部署 pytorch gpu模型 镜像选择
pytorch·docker·triton·模型推理
AKAMAI4 小时前
安全风暴的绝地反击 :从告警地狱到智能防护
运维·人工智能·云计算
岁月宁静4 小时前
深度定制:在 Vue 3.5 应用中集成流式 AI 写作助手的实践
前端·vue.js·人工智能