Sequential 损失函数 反向传播 优化器 模型的使用修改保存加载

Sequential & example1

感觉和compose()好像

串联起来 方便调用

python 复制代码
def __init()__(self):
    super(Net,self).__init__()
    self.model1 = Sequential(
        Conv2d(3,32,5,padding=2),
        MaxPool2d(2),
        Conv2d(32,32,5,padding=2),
        MaxPool2d(2),
        Conv2d(32,64,5,padding=2),
        MaxPool2d(2),
        Flatten(),
        Linear(1024,64),
        Linear(64,10)
    )
    
def forward(self,x):
    x = self.model1(x)
    return x

可以输出graph查看:

python 复制代码
writer = SummaryWriter('../logs')
writer.add_graph(net,input)
writer.close()

终于明白好多论文上的图是怎么来的了 好权威啊

完整版代码:

code:

python 复制代码
import torch
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.tensorboard import SummaryWriter


class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.model1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )
    def forward(self,x):
        x = self.model1(x)
        return x

net = Net()
print(net)
input = torch.ones((64,3,32,32))
output = net(input)
print(output.shape)

writer = SummaryWriter('../logs')
writer.add_graph(net,input)
writer.close()
损失函数 反向传播

损失函数:

python 复制代码
import torch
from torch import float32
from torch.nn import L1Loss
from torch import nn

inputs = torch.tensor([1,2,3],dtype=float32)
targets = torch.tensor([1,2,5],dtype=float32)

inputs = torch.reshape(inputs,(1,1,1,3))
targets = torch.reshape(targets,(1,1,1,3))

loss = L1Loss()
result = loss(inputs,targets)
print(result)

loss = nn.MSELoss()
result = loss(inputs,targets)
print(result)

x = torch.tensor([0.1,0.2,0.3])
y = torch.tensor([1])
x = torch.reshape(x,(1,3))
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x,y)
print(result_cross)

损失函数例子+反向传播(更新参数)

python 复制代码
import torch
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torchvision.datasets import ImageFolder

#数据预处理
transform = transforms.Compose([
    transforms.Resize((32,32)),
    transforms.ToTensor(),
    transforms.Normalize(
        mean = [0.5,0.5,0.5],
        std = [0.5,0.5,0.5]
    )
])

#加载数据集
folder_path = '../images'
dataset = ImageFolder(folder_path,transform=transform)
dataloader = DataLoader(dataset,batch_size=1)

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.model1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )
    def forward(self,x):
        x = self.model1(x)
        return x

net = Net()
loss = nn.CrossEntropyLoss()

for data in dataloader:
    img,label = data
    print(img.shape)
    output = net(img)
    result_loss = loss(output,label)
    print(result_loss)
    result_loss.backward()
优化器

随机梯度下降SGD

python 复制代码
import torch
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torchvision.datasets import ImageFolder

#数据预处理
transform = transforms.Compose([
    transforms.Resize((32,32)),
    transforms.ToTensor(),
    transforms.Normalize(
        mean = [0.5,0.5,0.5],
        std = [0.5,0.5,0.5]
    )
])

#加载数据集
folder_path = '../images'
dataset = ImageFolder(folder_path,transform=transform)
dataloader = DataLoader(dataset,batch_size=1)

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.model1 = Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )
    def forward(self,x):
        x = self.model1(x)
        return x

net = Net()
loss = nn.CrossEntropyLoss()
optim = torch.optim.SGD(net.parameters(),lr=0.01)
for epoch in range(10):
    running_loss = 0.0
    for data in dataloader:
        img, label = data
        output = net(img)
        result_loss = loss(output, label)
        optim.zero_grad()
        result_loss.backward()
        optim.step()
        #每次训练数据的损失和
        running_loss += result_loss
    print(running_loss)
现有模型的使用和修改

maybe可以称为迁移学习???

example net: vgg16

python 复制代码
#不预训练
vgg16_false = torchvision.models.vgg16(pretrained=False)
#预训练
vgg16_true = torchvision.models.vgg16(pretrained=True)

print(vgg16_true)

#添加一个模块 在vgg16的classifier里面加一个
vgg16_true.classifier.add_module('add_linear',nn.Linear(1000,10))
#修改模块中的数据
vgg16_false.classifier[6] = nn.Linear(4096,10)
模型保存和加载
  1. 现有模型:vgg16

    python 复制代码
    vgg16 = torchvision.models.vgg16(pretrained=False)
    python 复制代码
    #保存:模型结构+参数
    torch.save(vgg16,"vgg16_method1.pth")
    #加载:
    model = torch.load("vgg16_method1.pth")
    python 复制代码
    #保存:模型参数(官推)
    #保存成字典模式
    torch.save(vgg16.state_dict(),"vgg16_method2.pth")
    #加载
    vgg16 = torchvision.models.vgg16(pretrained=False)
    vgg16.load_state_dict(torch.load("vgg16_method2.pth"))
  2. 自定义模型

    python 复制代码
    #保存
    class Net(nn.Module):
        ...
        ...
    net = Net()
    torch.save(net,"net.pth")
    python 复制代码
    #加载
    class Net(nn.Module):
        ...
        ...
    model = torch.load("net.pth")
相关推荐
SEO_juper13 小时前
大型语言模型SEO(LLM SEO)完全手册:驾驭搜索新范式
人工智能·语言模型·自然语言处理·chatgpt·llm·seo·数字营销
攻城狮7号13 小时前
腾讯混元翻译模型Hunyuan-MT-7B开源,先前拿了30个冠军
人工智能·hunyuan-mt-7b·腾讯混元翻译模型·30个冠军
zezexihaha14 小时前
从“帮写文案”到“管生活”:个人AI工具的边界在哪?
人工智能
算家云14 小时前
nano banana官方最强Prompt模板来了!六大场景模板详解
人工智能·谷歌·ai大模型·算家云·ai生图·租算力,到算家云·nano banana 提示词
暴躁的大熊14 小时前
AI助力决策:告别生活与工作中的纠结,明析抉择引领明智选择
人工智能
Gyoku Mint14 小时前
提示词工程(Prompt Engineering)的崛起——为什么“会写Prompt”成了新技能?
人工智能·pytorch·深度学习·神经网络·语言模型·自然语言处理·nlp
honder试试14 小时前
焊接自动化测试平台图像处理分析-模型训练推理
开发语言·python
梁小憨憨14 小时前
zotero扩容
人工智能·笔记
大数据张老师14 小时前
AI架构师的思维方式与架构设计原则
人工智能·架构师·ai架构·后端架构
心本无晴.14 小时前
Python进程,线程
python·进程