生成对抗网络入门案例

前言

生成对抗网络(Generative Adversarial Networks,简称GANs)是一种用于生成新样本的机器学习模型。它由两个主要组件组成:生成器(Generator)和判别器(Discriminator)。生成器尝试生成与训练数据相似的新样本,而判别器则试图区分生成器生成的样本和真实训练数据。

下面是一个简单的对抗生成网络的入门例子,用于生成手写数字图像:

实现过程

1、导入必要的库和模块

python 复制代码
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Reshape
from tensorflow.keras.layers import Conv2D, Conv2DTranspose
from tensorflow.keras.optimizers import Adam

2、加载MNIST数据集

python 复制代码
(x_train, _), (_, _) = mnist.load_data()
x_train = x_train / 255.0
x_train = np.expand_dims(x_train, axis=3)

3、定义生成器模型

python 复制代码
generator = Sequential()
generator.add(Dense(7*7*128, input_shape=(100,), activation='relu'))
generator.add(Reshape((7, 7, 128)))
generator.add(Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', activation='relu'))
generator.add(Conv2DTranspose(1, (3, 3), strides=(2, 2), padding='same', activation='sigmoid'))

4、定义判别器模型

python 复制代码
discriminator = Sequential()
discriminator.add(Conv2D(64, (3, 3), strides=(2, 2), padding='same', input_shape=(28, 28, 1), activation='relu'))
discriminator.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same', activation='relu'))
discriminator.add(Flatten())
discriminator.add(Dense(1, activation='sigmoid'))

5、编译判别器模型

python 复制代码
discriminator.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.0002, beta_1=0.5), metrics=['accuracy'])

6、冻结判别器模型的权重

python 复制代码
discriminator.trainable = False

7、定义GAN模型

python 复制代码
gan = Sequential()
gan.add(generator)
gan.add(discriminator)

8、编译GAN模型

python 复制代码
gan.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.0002, beta_1=0.5))

9、定义训练函数

python 复制代码
def train_gan(epochs, batch_size, sample_interval):
    for epoch in range(epochs):
        # 生成随机噪声作为输入
        noise = np.random.normal(0, 1, (batch_size, 100))
        
        # 生成假样本
        generated_images = generator.predict(noise)
        
        # 从真实样本中随机选择一批样本
        real_images = x_train[np.random.randint(0, x_train.shape[0], batch_size)]
        
        # 训练判别器
        discriminator_loss_real = discriminator.train_on_batch(real_images, np.ones((batch_size, 1)))
        discriminator_loss_fake = discriminator.train_on_batch(generated_images, np.zeros((batch_size, 1)))
        discriminator_loss = 0.5 * np.add(discriminator_loss_real, discriminator_loss_fake)
        
        # 训练生成器
        noise = np.random.normal(0, 1, (batch_size, 100))
        generator_loss = gan.train_on_batch(noise, np.ones((batch_size, 1)))
        
        # 打印损失
        if epoch % sample_interval == 0:
            print(f"Epoch {epoch}/{epochs}, Discriminator Loss: {discriminator_loss[0]}, Generator Loss: {generator_loss}")
            
            # 保存生成的图像
            save_images(epoch)

10、保存生成的图像

python 复制代码
def save_images(epoch):
    rows, cols = 5, 5
    noise = np.random.normal(0, 1, (rows * cols, 100))
    generated_images = generator.predict(noise)
    generated_images = 0.5 * generated_images + 0.5
    fig, axs = plt.subplots(rows, cols)
    idx = 0
    for i in range(rows):
        for j in range(cols):
            axs[i, j].imshow(generated_images[idx, :, :, 0], cmap='gray')
            axs[i, j].axis('off')
            idx += 1
    fig.savefig(f"gan_images/mnist_{epoch}.png")
    plt.close()

11、训练GAN模型

python 复制代码
epochs = 10000
batch_size = 128
sample_interval = 1000

完整代码

python 复制代码
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Reshape
from tensorflow.keras.layers import Conv2D, Conv2DTranspose
from tensorflow.keras.optimizers import Adam

# 加载MNIST数据集
(x_train, _), (_, _) = mnist.load_data()
x_train = x_train / 255.0
x_train = np.expand_dims(x_train, axis=3)

# 定义生成器模型
generator = Sequential()
generator.add(Dense(7*7*128, input_shape=(100,), activation='relu'))
generator.add(Reshape((7, 7, 128)))
generator.add(Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', activation='relu'))
generator.add(Conv2DTranspose(1, (3, 3), strides=(2, 2), padding='same', activation='sigmoid'))

# 定义判别器模型
discriminator = Sequential()
discriminator.add(Conv2D(64, (3, 3), strides=(2, 2), padding='same', input_shape=(28, 28, 1), activation='relu'))
discriminator.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same', activation='relu'))
discriminator.add(Flatten())
discriminator.add(Dense(1, activation='sigmoid'))

# 编译判别器模型
discriminator.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.0002, beta_1=0.5), metrics=['accuracy'])

# 冻结判别器模型的权重
discriminator.trainable = False

# 定义GAN模型
gan = Sequential()
gan.add(generator)
gan.add(discriminator)

# 编译GAN模型
gan.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.0002, beta_1=0.5))

# 定义训练函数
def train_gan(epochs, batch_size, sample_interval):
    for epoch in range(epochs):
        # 生成随机噪声作为输入
        noise = np.random.normal(0, 1, (batch_size, 100))
        
        # 生成假样本
        generated_images = generator.predict(noise)
        
        # 从真实样本中随机选择一批样本
        real_images = x_train[np.random.randint(0, x_train.shape[0], batch_size)]
        
        # 训练判别器
        discriminator_loss_real = discriminator.train_on_batch(real_images, np.ones((batch_size, 1)))
        discriminator_loss_fake = discriminator.train_on_batch(generated_images, np.zeros((batch_size, 1)))
        discriminator_loss = 0.5 * np.add(discriminator_loss_real, discriminator_loss_fake)
        
        # 训练生成器
        noise = np.random.normal(0, 1, (batch_size, 100))
        generator_loss = gan.train_on_batch(noise, np.ones((batch_size, 1)))
        
        # 打印损失
        if epoch % sample_interval == 0:
            print(f"Epoch {epoch}/{epochs}, Discriminator Loss: {discriminator_loss[0]}, Generator Loss: {generator_loss}")
            
            # 保存生成的图像
            save_images(epoch)
            
# 保存生成的图像
def save_images(epoch):
    rows, cols = 5, 5
    noise = np.random.normal(0, 1, (rows * cols, 100))
    generated_images = generator.predict(noise)
    generated_images = 0.5 * generated_images + 0.5
    fig, axs = plt.subplots(rows, cols)
    idx = 0
    for i in range(rows):
        for j in range(cols):
            axs[i, j].imshow(generated_images[idx, :, :, 0], cmap='gray')
            axs[i, j].axis('off')
            idx += 1
    fig.savefig(f"gan_images/mnist_{epoch}.png")
    plt.close()
    
# 训练GAN模型
epochs = 10000
batch_size = 128
sample_interval = 1000

train_gan(epochs, batch_size, sample_interval)

这个例子使用了MNIST数据集,生成手写数字图像。生成器和判别器模型使用了卷积神经网络的结构。在训练过程中,生成器试图生成逼真的手写数字图像,而判别器则试图区分真实图像和生成图像。通过反复迭代训练生成器和判别器,GAN模型能够逐渐生成更逼真的手写数字图像。生成的图像会保存在gan_images文件夹中。

相关推荐
GitCode官方2 分钟前
参会预告 | AtomGit 邀您共赴 TritonNext 2026 技术大会,解锁 AI 系统与编译生态新机遇
人工智能·开源·atomgit
MobiusStack10 分钟前
Cursor团队最新文章解读丨动态上下文发现,重新定义AI记忆
人工智能
Rui_Freely23 分钟前
Vins-Fusion之 相机—IMU在线标定(十一)
人工智能·算法·计算机视觉
沛沛老爹24 分钟前
Web开发者5分钟上手:Agent Skills环境搭建与基础使用实战
java·人工智能·llm·llama·rag·agent skills
DeepFlow 零侵扰全栈可观测34 分钟前
3分钟定位OA系统GC瓶颈:DeepFlow全栈可观测平台实战解析
大数据·运维·人工智能·云原生·性能优化
想用offer打牌1 小时前
一站式讲清Spring AI Alibaba的OverAllState和RunnableConfig
人工智能·架构·github
生成论实验室1 小时前
生成论之基:“阴阳”作为元规则的重构与证成——基于《易经》与《道德经》的古典重诠与现代显象
人工智能·科技·神经网络·算法·架构
数据分享者1 小时前
对话对齐反馈数据集:12000+高质量人类-助手多轮对话用于RLHF模型训练与评估-人工智能-大语言模型对齐-人类反馈强化学习-训练符合人类期望的对话模型
人工智能·语言模型·自然语言处理
Java后端的Ai之路1 小时前
【人工智能领域】- 卷积神经网络(CNN)深度解析
人工智能·神经网络·cnn
_清欢l1 小时前
Dify+test2data实现自然语言查询数据库
数据库·人工智能·openai