FGSM快速梯度符号法非定向攻击代码(PyTorch)

数据集:手写字体识别MNIST

模型:LeNet

复制代码
import torch.nn as nn
import torch.nn.functional as F
import torch
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
use_cuda = True
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")


# LeNet 模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)  # 防止过拟合,实现时必须标明training的状态为self.training
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)


test_loader = torch.utils.data.DataLoader(#导入数据
    datasets.MNIST('data', train=False, download=True, transform=transforms.Compose([
            transforms.ToTensor(),
            ])),
        batch_size=1, shuffle=True)


model = Net().to(device)
pretrained_model = "lenet_mnist_model.pth"
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
model.eval()


def fgsm_attack(image, epsilon, data_grad):  # 此函数的功能是进行fgsm攻击,需要输入三个变量,干净的图片,扰动量和输入图片梯度
    sign_data_grad = data_grad.sign()  # 梯度符号
    # print(sign_data_grad)
    perturbed_image = image+epsilon*sign_data_grad  # 公式
    perturbed_image = torch.clamp(perturbed_image, 0, 1)  # 为了保持图像的原始范围,将受干扰的图像裁剪到一定的范围【0,1】
    return perturbed_image


epsilons = [0, .05, .1, .15, .2, .25, .3]


def test(model, device, test_loader, epsilon):
    correct = 0
    adv_examples = []
    for data, target in test_loader:
        data, target = data.to(device), target.to(device)
        data.requires_grad = True
        output = model(data)
        init_pred = output.max(1, keepdim=True)[1]  # 选取最大的类别概率
        loss = F.nll_loss(output, target)
        model.zero_grad()
        loss.backward()
        data_grad = data.grad.data
        perturbed_data = fgsm_attack(data, epsilon, data_grad)
        output = model(perturbed_data)
        final_pred = output.max(1, keepdim=True)[1]
        if final_pred.item() == target.item():  # 判断类别是否相等
            correct += 1
        if len(adv_examples) < 6:
            adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
            adv_examples.append((init_pred.item(), final_pred.item(), adv_ex))

    final_acc = correct / float(len(test_loader))  # 算正确率
    print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
    return final_acc, adv_examples


accuracies = []
examples = []

# Run test for each epsilon
for eps in epsilons:
    acc, ex = test(model, device, test_loader, eps)
    accuracies.append(acc)
    examples.append(ex)

plt.plot(epsilons, accuracies)
plt.show()

cnt = 0
plt.figure(figsize=(8, 10))
for i in range(len(epsilons)):
    for j in range(len(examples[i])):
        cnt += 1
        plt.subplot(len(epsilons), len(examples[0]), cnt)
        plt.xticks([], [])
        plt.yticks([], [])
        if j == 0:
            plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
        orig, adv, ex = examples[i][j]
        plt.title("{} -> {}".format(orig, adv))
        plt.imshow(ex, cmap="gray")
plt.tight_layout()
plt.show()
相关推荐
luofeiju8 分钟前
RGB下的色彩变换:用线性代数解构色彩世界
图像处理·人工智能·opencv·线性代数
测试者家园10 分钟前
基于DeepSeek和crewAI构建测试用例脚本生成器
人工智能·python·测试用例·智能体·智能化测试·crewai
张较瘦_14 分钟前
[论文阅读] 人工智能 + 软件工程 | Call Me Maybe:用图神经网络增强JavaScript调用图构建
论文阅读·人工智能·软件工程
大模型真好玩15 分钟前
准确率飙升!Graph RAG如何利用知识图谱提升RAG答案质量(四)——微软GraphRAG代码实战
人工智能·python·mcp
前端付豪22 分钟前
11、打造自己的 CLI 工具:从命令行到桌面效率神器
后端·python
前端付豪22 分钟前
12、用类写出更可控、更易扩展的爬虫框架🕷
后端·python
Baihai_IDP28 分钟前
vec2text 技术已开源!一定条件下,文本嵌入向量可“近乎完美地”还原
人工智能·面试·llm
江太翁33 分钟前
Pytorch torch
人工智能·pytorch·python
拓端研究室1 小时前
专题:2025即时零售与各类人群消费行为洞察报告|附400+份报告PDF、原数据表汇总下载
大数据·人工智能
网安INF1 小时前
深度学习中的逻辑回归:从原理到Python实现
人工智能·python·深度学习·算法·逻辑回归