rescorediff

复制代码
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import copy
from einops import rearrange


class SinusoidalPosEmb(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.dim = dim

    def forward(self, x):
        device = x.device
        half_dim = self.dim // 2
        emb = math.log(10000) / (half_dim - 1)
        emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
        emb = x[:, None] * emb[None, :]
        emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
        return emb


# diff
class ResidualConv(nn.Module):  # diff
    def __init__(self, in_ch, out_ch):  # diff
        super(ResidualConv, self).__init__()  # diff
        self.conv = nn.Sequential(  # diff
            nn.Conv2d(in_ch, out_ch, 3, padding=1),  # diff
            nn.ReLU(inplace=True),  # diff
            nn.Conv2d(out_ch, out_ch, 3, padding=1),  # diff
            nn.ReLU(inplace=True)  # diff
        )  # diff
        self.shortcut = nn.Conv2d(in_ch, out_ch, 1)  # diff

    def forward(self, x):  # diff
        return self.conv(x) + self.shortcut(x)  # diff


class up(nn.Module):
    def __init__(self, in_ch):
        super(up, self).__init__()
        self.up = nn.ConvTranspose2d(in_ch, in_ch // 2, 2, stride=2)

    def forward(self, x1, x2):
        x1 = self.up(x1)

        # input is CHW
        diffY = x2.size()[2] - x1.size()[2]
        diffX = x2.size()[3] - x1.size()[3]

        x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
                        diffY // 2, diffY - diffY // 2))

        x = x2 + x1
        return x


class outconv(nn.Module):
    def __init__(self, in_ch, out_ch):
        super(outconv, self).__init__()
        self.conv = nn.Conv2d(in_ch, out_ch, 1)

    def forward(self, x):
        x = self.conv(x)
        return x


class adjust_net(nn.Module):
    def __init__(self, out_channels=64, middle_channels=32):
        super(adjust_net, self).__init__()

        self.model = nn.Sequential(
            nn.Conv2d(2, middle_channels, 3, padding=1),
            nn.ReLU(inplace=True),
            nn.AvgPool2d(2),

            nn.Conv2d(middle_channels, middle_channels * 2, 3, padding=1),
            nn.ReLU(inplace=True),
            nn.AvgPool2d(2),

            nn.Conv2d(middle_channels * 2, middle_channels * 4, 3, padding=1),
            nn.ReLU(inplace=True),
            nn.AvgPool2d(2),

            nn.Conv2d(middle_channels * 4, out_channels * 2, 1, padding=0)
        )

    def forward(self, x):
        out = self.model(x)
        out = F.adaptive_avg_pool2d(out, (1, 1))
        out1 = out[:, :out.shape[1] // 2]
        out2 = out[:, out.shape[1] // 2:]
        return out1, out2


# The architecture of U-Net refers to "Toward Convolutional Blind Denoising of Real Photographs",
# official MATLAB implementation: https://github.com/GuoShi28/CBDNet.
# unofficial PyTorch implementation: https://github.com/IDKiro/CBDNet-pytorch/tree/master.
# We improved it by adding time step embedding and EMM module, while removing the noise estimation network.
class UNet(nn.Module):
    def __init__(self, in_channels=2, out_channels=1):
        super(UNet, self).__init__()

        dim = 32
        self.time_mlp = nn.Sequential(
            SinusoidalPosEmb(dim),
            nn.Linear(dim, dim * 4),
            nn.GELU(),
            nn.Linear(dim * 4, dim)
        )

        self.inc = nn.Sequential(
            ResidualConv(in_channels, 64),  # diff
            ResidualConv(64, 64)  # diff
        )

        self.down1 = nn.AvgPool2d(2)
        self.mlp1 = nn.Sequential(
            nn.GELU(),
            nn.Linear(dim, 64)
        )
        self.adjust1 = adjust_net(64)
        self.conv1 = nn.Sequential(
            ResidualConv(64, 128),  # diff
            ResidualConv(128, 128),  # diff
            ResidualConv(128, 128)  # diff
        )

        self.down2 = nn.AvgPool2d(2)
        self.mlp2 = nn.Sequential(
            nn.GELU(),
            nn.Linear(dim, 128)
        )
        self.adjust2 = adjust_net(128)
        self.conv2 = nn.Sequential(
            ResidualConv(128, 256),  # diff
            ResidualConv(256, 256),  # diff
            ResidualConv(256, 256),  # diff
            ResidualConv(256, 256),  # diff
            ResidualConv(256, 256),  # diff
            ResidualConv(256, 256)  # diff
        )

        self.up1 = up(256)
        self.mlp3 = nn.Sequential(
            nn.GELU(),
            nn.Linear(dim, 128)
        )
        self.adjust3 = adjust_net(128)
        self.conv3 = nn.Sequential(
            ResidualConv(128, 128),  # diff
            ResidualConv(128, 128),  # diff
            ResidualConv(128, 128)  # diff
        )

        self.up2 = up(128)
        self.mlp4 = nn.Sequential(
            nn.GELU(),
            nn.Linear(dim, 64)
        )
        self.adjust4 = adjust_net(64)
        self.conv4 = nn.Sequential(
            ResidualConv(64, 64),  # diff
            ResidualConv(64, 64)  # diff
        )

        self.outc = outconv(64, out_channels)

    def forward(self, x, t, x_adjust, adjust):
        inx = self.inc(x)
        time_emb = self.time_mlp(t)
        down1 = self.down1(inx)
        condition1 = self.mlp1(time_emb)
        b, c = condition1.shape
        condition1 = rearrange(condition1, 'b c -> b c 1 1')
        if adjust:
            gamma1, beta1 = self.adjust1(x_adjust)
            down1 = down1 + gamma1 * condition1 + beta1
        else:
            down1 = down1 + condition1
        conv1 = self.conv1(down1)

        down2 = self.down2(conv1)
        condition2 = self.mlp2(time_emb)
        b, c = condition2.shape
        condition2 = rearrange(condition2, 'b c -> b c 1 1')
        if adjust:
            gamma2, beta2 = self.adjust2(x_adjust)
            down2 = down2 + gamma2 * condition2 + beta2
        else:
            down2 = down2 + condition2
        conv2 = self.conv2(down2)

        up1 = self.up1(conv2, conv1)
        condition3 = self.mlp3(time_emb)
        b, c = condition3.shape
        condition3 = rearrange(condition3, 'b c -> b c 1 1')
        if adjust:
            gamma3, beta3 = self.adjust3(x_adjust)
            up1 = up1 + gamma3 * condition3 + beta3
        else:
            up1 = up1 + condition3
        conv3 = self.conv3(up1)

        up2 = self.up2(conv3, inx)
        condition4 = self.mlp4(time_emb)
        b, c = condition4.shape
        condition4 = rearrange(condition4, 'b c -> b c 1 1')
        if adjust:
            gamma4, beta4 = self.adjust4(x_adjust)
            up2 = up2 + gamma4 * condition4 + beta4
        else:
            up2 = up2 + condition4
        conv4 = self.conv4(up2)

        out = self.outc(conv4)
        return out


class Network(nn.Module):
    def __init__(self, in_channels=3, out_channels=1, context=True):
        super(Network, self).__init__()
        self.unet = UNet(in_channels=in_channels, out_channels=out_channels)
        self.context = context

    def forward(self, x, t, y, x_end, adjust=True):
        if self.context:
            x_middle = x[:, 1].unsqueeze(1)
        else:
            x_middle = x

        x_adjust = torch.cat((y, x_end), dim=1)
        out = self.unet(x, t, x_adjust, adjust=adjust) + x_middle

        return out


# WeightNet of the one-shot learning framework
class WeightNet(nn.Module):
    def __init__(self, weight_num=10):
        super(WeightNet, self).__init__()
        init = torch.ones([1, weight_num, 1, 1]) / weight_num
        self.weights = nn.Parameter(init)

    def forward(self, x):
        weights = F.softmax(self.weights, 1)
        out = weights * x
        out = out.sum(dim=1, keepdim=True)

        return out, weights
相关推荐
萧鼎43 分钟前
【Python】高效数据处理:使用Dask处理大规模数据
开发语言·python
互联网杂货铺1 小时前
Python测试框架—pytest详解
自动化测试·软件测试·python·测试工具·测试用例·pytest·1024程序员节
yyfhq1 小时前
dcgan
深度学习·机器学习·生成对抗网络
Ellie陈1 小时前
Java已死,大模型才是未来?
java·开发语言·前端·后端·python
这个男人是小帅1 小时前
【图神经网络】 AM-GCN论文精讲(全网最细致篇)
人工智能·pytorch·深度学习·神经网络·分类
菜鸟的人工智能之路1 小时前
ROC 曲线:医学研究中的得力助手
python·数据分析·健康医疗
梦幻精灵_cq2 小时前
python包结构模块如何有效传递数据?
python
黑叶白树2 小时前
包和模块(上) python复习笔记
开发语言·笔记·python
放松吃羊肉2 小时前
【约束优化】一次搞定拉格朗日,对偶问题,弱对偶定理,Slater条件和KKT条件
人工智能·机器学习·支持向量机·对偶问题·约束优化·拉格朗日·kkt
YRr YRr3 小时前
深度学习:正则化(Regularization)详细解释
人工智能·深度学习