1. 回归:SSE和MSE
python
# MSE损失函数
import torch
from torch.nn import MSELoss
yhat = torch.randn(size=(50,), dtype=torch.float32)
y = torch.randn(size=(50,), dtype=torch.float32)
criterion = MSELoss()
loss1 = criterion(yhat, y)
# 计算mse 误差平方
criterion = MSELoss(reduction="mean")
loss2 = criterion(yhat, y)
# 计算sse
criterion = MSELoss(reduction="sum")
loss3 = criterion(yhat, y)
loss1, loss2, loss3
2. BCELoss二分类交叉熵损失函数
方法1:nn模块中的类
class BCEWithLogitsLoss
class BCELoss
方法2:functional库中的计算函数 (很少用到)function F.binary_cross_entropy_with_logits
function F.binary_cross_entropy
python
# 二分类交叉熵损失函数 方法一、手动实现 BCELoss
import torch
N = 3*pow(10,3)
torch.random.manual_seed(420)
X = torch.rand((N,4),dtype=torch.float32)
w = torch.rand((4,1),dtype=torch.float32,requires_grad=True)
y = torch.randint(low=0,high=2,size=(N,1),dtype=torch.float32)
zhat = torch.mm(X,w)
sigma = torch.sigmoid(zhat)
loss = -(1/N) * torch.sum( (1-y)*torch.log(1-sigma) + y*torch.log(sigma) ) # binary cross entropy loss
loss
python
# BCELoss 方法二、使用类
import torch
import torch.nn as nn
# X, w, y
# zhat, sigma(sigmoid)
criterion = nn.BCELoss() # 不带sigmoid函数, 主要为了监控准确率
loss1 = criterion(sigma, y)
criterion = nn.BCEWithLogitsLoss() # 带有sigmoid函数
loss2 = criterion(zhat, y)
loss1, loss2
python
# BCELoss 方法三、使用函数
import torch
from torch.nn import functional as F
# X, w, y
# zhat, sigma(sigmoid)
loss1 = F.binary_cross_entropy(sigma, y) # 没有sigmoid函数
loss2 = F.binary_cross_entropy_with_logits(zhat, y) # 有sigmoid函数
loss1, loss2
3. CrossEntropyLoss 多分类交叉熵损失函数
方法1:调用logsoftmax和NLLLoss实现
方法2:直接调用CrossEntropyLoss
python
# 多分类交叉熵损失函数
import torch
import torch.nn as nn
N = 3*pow(10,2)
torch.random.manual_seed(420)
X = torch.rand((N,4),dtype=torch.float32)
w = torch.rand((4,3),dtype=torch.float32,requires_grad=True)
y = torch.randint(low=0,high=2,size=(N,),dtype=torch.float32)
python
# 方法一、LogSoftmax 和 NLLLoss
zhat = torch.mm(X,w)
logsm = nn.LogSoftmax(dim=1)
logsigma = logsm(zhat)
criterion = nn.NLLLoss() # 将标签转化成 独热编码,01的稀疏矩阵,类型是Long
loss1 = criterion(logsigma, y.long())
python
# 方法二、CrossEntropyLoss
criterion = nn.CrossEntropyLoss() # 属性reduction的值,mean、sum、None
loss2 = criterion(zhat, y.long())
criterion = nn.CrossEntropyLoss(reduction="mean") # 属性reduction的值,mean、sum、none
loss_mean = criterion(zhat, y.long())
criterion = nn.CrossEntropyLoss(reduction="sum") # 属性reduction的值,mean、sum、none
loss_sum = criterion(zhat, y.long())
criterion = nn.CrossEntropyLoss(reduction="none") # 没有聚合函数的聚合效果
loss_none = criterion(zhat, y.long())
loss1, loss2, loss_mean, loss_sum, loss_none