用于约束图像生成,作为loss。
可梯度优化
- pytorch structural similarity (SSIM) loss https://github.com/Po-Hsun-Su/pytorch-ssim
- https://github.com/harveyslash/Facial-Similarity-with-Siamese-Networks-in-Pytorch/blob/master/Siamese-networks-medium.ipynb
c
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2, keepdim = True)
loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +
(label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
- 多个集合,参看写法 Multi-Similarity Loss for Deep Metric Learning (MS-Loss)
- 参考 https://blog.csdn.net/m0_46204224/article/details/117997854
c
@LOSS.register('ms_loss')
class MultiSimilarityLoss(nn.Module):
def __init__(self, cfg):
super(MultiSimilarityLoss, self).__init__()
self.thresh = 0.5
self.margin = 0.1
self.scale_pos = cfg.LOSSES.MULTI_SIMILARITY_LOSS.SCALE_POS
self.scale_neg = cfg.LOSSES.MULTI_SIMILARITY_LOSS.SCALE_NEG
def forward(self, feats, labels):
assert feats.size(0) == labels.size(0), \
f"feats.size(0): {feats.size(0)} is not equal to labels.size(0): {labels.size(0)}"
batch_size = feats.size(0)
sim_mat = torch.matmul(feats, torch.t(feats))
epsilon = 1e-5
loss = list()
for i in range(batch_size):
pos_pair_ = sim_mat[i][labels == labels[i]]
pos_pair_ = pos_pair_[pos_pair_ < 1 - epsilon]
neg_pair_ = sim_mat[i][labels != labels[i]]
neg_pair = neg_pair_[neg_pair_ + self.margin > min(pos_pair_)]
pos_pair = pos_pair_[pos_pair_ - self.margin < max(neg_pair_)]
if len(neg_pair) < 1 or len(pos_pair) < 1:
continue
# weighting step
pos_loss = 1.0 / self.scale_pos * torch.log(
1 + torch.sum(torch.exp(-self.scale_pos * (pos_pair - self.thresh))))
neg_loss = 1.0 / self.scale_neg * torch.log(
1 + torch.sum(torch.exp(self.scale_neg * (neg_pair - self.thresh))))
loss.append(pos_loss + neg_loss)
if len(loss) == 0:
return torch.zeros([], requires_grad=True)
loss = sum(loss) / batch_size
return loss
- Recall@k Surrogate Loss with Large Batches and Similarity Mixup https://github.com/yash0307/RecallatK_surrogate
c
class RecallatK(torch.nn.Module):
def __init__(self, anneal, batch_size, num_id, feat_dims, k_vals, k_temperatures, mixup):
super(RecallatK, self).__init__()
assert(batch_size%num_id==0)
self.anneal = anneal
self.batch_size = batch_size
self.num_id = num_id
self.feat_dims = feat_dims
self.k_vals = [min(batch_size, k) for k in k_vals]
self.k_temperatures = k_temperatures
self.mixup = mixup
self.samples_per_class = int(batch_size/num_id)
def forward(self, preds, q_id):
batch_size = preds.shape[0]
num_id = self.num_id
anneal = self.anneal
feat_dims = self.feat_dims
k_vals = self.k_vals
k_temperatures = self.k_temperatures
samples_per_class = int(batch_size/num_id)
norm_vals = torch.Tensor([min(k, (samples_per_class-1)) for k in k_vals]).cuda()
group_num = int(q_id/samples_per_class)
q_id_ = group_num*samples_per_class
sim_all = (preds[q_id]*preds).sum(1)
sim_all_g = sim_all.view(num_id, int(batch_size/num_id))
sim_diff_all = sim_all.unsqueeze(-1) - sim_all_g[group_num, :].unsqueeze(0).repeat(batch_size,1)
sim_sg = sigmoid(sim_diff_all, temp=anneal)
for i in range(samples_per_class): sim_sg[group_num*samples_per_class+i,i] = 0.
sim_all_rk = (1.0 + torch.sum(sim_sg, dim=0)).unsqueeze(dim=0)
sim_all_rk[:, q_id%samples_per_class] = 0.
sim_all_rk = sim_all_rk.unsqueeze(dim=-1).repeat(1,1,len(k_vals))
k_vals = torch.Tensor(k_vals).cuda()
k_vals = k_vals.unsqueeze(dim=0).unsqueeze(dim=0).repeat(1, samples_per_class, 1)
sim_all_rk = k_vals - sim_all_rk
for given_k in range(0, len(self.k_vals)):
sim_all_rk[:,:,given_k] = sigmoid(sim_all_rk[:,:,given_k], temp=float(k_temperatures[given_k]))
sim_all_rk[:,q_id%samples_per_class,:] = 0.
k_vals_loss = torch.Tensor(self.k_vals).cuda()
k_vals_loss = k_vals_loss.unsqueeze(dim=0)
recall = torch.sum(sim_all_rk, dim=1)
recall = torch.minimum(recall, k_vals_loss)
recall = torch.sum(recall, dim=0)
recall = torch.div(recall, norm_vals)
recall = torch.sum(recall)/len(self.k_vals)
return (1.-recall)/batch_size
-
Circle Loss https://github.com/TinyZeaMays/CircleLoss/blob/master/circle_loss.py
-
Torch的官方 https://pytorch.org/docs/1.12/nn.functional.html#loss-functions
-
Hard Triplet loss
c
from __future__ import absolute_import
import sys
import torch
from torch import nn
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class TripletLoss(nn.Module):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py.
Args:
margin (float): margin for triplet.
"""
def __init__(self, margin=0.3):#三元组的阈值margin
super(TripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)#三元组损失函数
#ap an margin y:倍率 Relu(ap - anxy + margin)这个relu就起到和0比较的作用
def forward(self, inputs, targets):
"""
Args:
inputs: visualization_feature_map matrix with shape (batch_size, feat_dim)#32x2048
targets: ground truth labels with shape (num_classes)#tensor([32])[1,1,1,1,2,3,2,,,,2]32个数,一个数代表ID的真实标签
"""
n = inputs.size(0)#取出输入的batch
# Compute pairwise distance, replace by the official when merged
#计算距离矩阵,其实就是计算两个2048维之间的距离平方(a-b)**2=a^2+b^2-2ab
#[1,2,3]*[1,2,3]=[1,4,9].sum()=14 点乘
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist + dist.t()
dist.addmm_(1, -2, inputs, inputs.t())#生成距离矩阵32x32,.t()表示转置
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability#clamp(min=1e-12)加这个防止矩阵中有0,对梯度下降不好
# For each anchor, find the hardest positive and negative
mask = targets.expand(n, n).eq(targets.expand(n, n).t())#利用target标签的expand,并eq,获得mask的范围,由0,1组成,,红色1表示是同一个人,绿色0表示不是同一个人
dist_ap, dist_an = [], []#用来存放ap,an
for i in range(n):#i表示行
# dist[i][mask[i]],,i=0时,取mask的第一行,取距离矩阵的第一行,然后得到tensor([1.0000e-06, 1.0000e-06, 1.0000e-06, 1.0000e-06])
dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))#取某一行中,红色区域的最大值,mask前4个是1,与dist相乘
dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))#取某一行,绿色区域的最小值,加一个.unsqueeze(0)将其变成带有维度的tensor
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
# Compute ranking hinge loss
y = torch.ones_like(dist_an)#y是个权重,长度像dist-an
loss = self.ranking_loss(dist_an, dist_ap, y) #ID损失:交叉商输入的是32xf f.shape=分类数,然后loss用于计算损失
#度量三元组:输入的是dist_an(从距离矩阵中,挑出一行(即一个ID)的最大距离),dist_ap
#ranking_loss输入 an ap margin y:倍率 loss: Relu(ap - anxy + margin)这个relu就起到和0比较的作用
# from IPython import embed
# embed()
return loss
class MultiSimilarityLoss(nn.Module):
def __init__(self, margin=0.7):
super(MultiSimilarityLoss, self).__init__()
self.thresh = 0.5
self.margin = margin
self.scale_pos = 2.0
self.scale_neg = 40.0
def forward(self, feats, labels):
assert feats.size(0) == labels.size(0), \
f"feats.size(0): {feats.size(0)} is not equal to labels.size(0): {labels.size(0)}"
batch_size = feats.size(0)
feats = nn.functional.normalize(feats, p=2, dim=1)
# Shape: batchsize * batch size
sim_mat = torch.matmul(feats, torch.t(feats))
epsilon = 1e-5
loss = list()
mask = labels.expand(batch_size, batch_size).eq(
labels.expand(batch_size, batch_size).t())
for i in range(batch_size):
pos_pair_ = sim_mat[i][mask[i]]
pos_pair_ = pos_pair_[pos_pair_ < 1 - epsilon]
neg_pair_ = sim_mat[i][mask[i] == 0]
neg_pair = neg_pair_[neg_pair_ + self.margin > min(pos_pair_)]
pos_pair = pos_pair_[pos_pair_ - self.margin < max(neg_pair_)]
if len(neg_pair) < 1 or len(pos_pair) < 1:
continue
# weighting step
pos_loss = 1.0 / self.scale_pos * torch.log(
1 + torch.sum(torch.exp(-self.scale_pos * (pos_pair - self.thresh))))
neg_loss = 1.0 / self.scale_neg * torch.log(
1 + torch.sum(torch.exp(self.scale_neg * (neg_pair - self.thresh))))
loss.append(pos_loss + neg_loss)
# pos_loss =
if len(loss) == 0:
return torch.zeros([], requires_grad=True, device=feats.device)
loss = sum(loss) / batch_size
return loss
if __name__ == '__main__':
#测试TripletLoss(nn.Module)
use_gpu = False
model = TripletLoss()
features = torch.rand(32, 2048)
label= torch.Tensor([1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,8]).long()
loss = model(features, label)
print(loss)