Tensorflow2.0笔记 - 使用卷积神经网络层做CIFA100数据集训练(类VGG13)

本笔记记录CNN做CIFAR100数据集的训练相关内容,代码中使用了类似VGG13的网络结构,做了两个Sequetial(CNN和全连接层),没有用Flatten层而是用reshape操作做CNN和全连接层的中转操作。由于网络层次较深,参数量相比之前的网络多了不少,因此只做了10次epoch(RTX4090),没有继续跑了,最终准确率大概在33.8%左右。

import os
import time
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, Input

os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
#tf.random.set_seed(12345)
tf.__version__

#如果下载很慢,可以使用迅雷下载到本地,迅雷的链接也可以直接用官网URL:
#      https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz
#下载好后,将cifar-100.python.tar.gz放到 .keras\datasets 目录下(我的环境是C:\Users\Administrator\.keras\datasets)
# 参考:https://blog.csdn.net/zy_like_study/article/details/104219259
(x_train,y_train), (x_test, y_test) = datasets.cifar100.load_data()
print("Train data shape:", x_train.shape)
print("Train label shape:", y_train.shape)
print("Test data shape:", x_test.shape)
print("Test label shape:", y_test.shape)

def preprocess(x, y):
    x = tf.cast(x, dtype=tf.float32) / 255.
    y = tf.cast(y, dtype=tf.int32)
    return x,y

y_train = tf.squeeze(y_train, axis=1)
y_test = tf.squeeze(y_test, axis=1)

batch_size = 128
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_db = train_db.shuffle(1000).map(preprocess).batch(batch_size)

test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_db = test_db.map(preprocess).batch(batch_size)

sample = next(iter(train_db))
print("Train data sample:", sample[0].shape, sample[1].shape, 
         tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))


#创建CNN网络,总共4个unit,每个unit主要是两个卷积层和Max Pooling池化层
cnn_layers = [
    #unit 1
    layers.Conv2D(64, kernel_size=[3,3], padding='same', activation='relu'),
    layers.Conv2D(64, kernel_size=[3,3], padding='same', activation='relu'),
    #layers.MaxPool2D(pool_size=[2,2], strides=2, padding='same'),
    layers.MaxPool2D(pool_size=[2,2], strides=2),

    #unit 2
    layers.Conv2D(128, kernel_size=[3,3], padding='same', activation='relu'),
    layers.Conv2D(128, kernel_size=[3,3], padding='same', activation='relu'),
    #layers.MaxPool2D(pool_size=[2,2], strides=2, padding='same'),
    layers.MaxPool2D(pool_size=[2,2], strides=2),

    #unit 3
    layers.Conv2D(256, kernel_size=[3,3], padding='same', activation='relu'),
    layers.Conv2D(256, kernel_size=[3,3], padding='same', activation='relu'),
    #layers.MaxPool2D(pool_size=[2,2], strides=2, padding='same'),
    layers.MaxPool2D(pool_size=[2,2], strides=2),

    #unit 4
    layers.Conv2D(512, kernel_size=[3,3], padding='same', activation='relu'),
    layers.Conv2D(512, kernel_size=[3,3], padding='same', activation='relu'),
    #layers.MaxPool2D(pool_size=[2,2], strides=2, padding='same'),
    layers.MaxPool2D(pool_size=[2,2], strides=2),

    #unit 5
    layers.Conv2D(512, kernel_size=[3,3], padding='same', activation='relu'),
    layers.Conv2D(512, kernel_size=[3,3], padding='same', activation='relu'),
    #layers.MaxPool2D(pool_size=[2,2], strides=2, padding='same'),
    layers.MaxPool2D(pool_size=[2,2], strides=2),
]


def main():
    #[b, 32, 32, 3] => [b, 1, 1, 512]
    cnn_net = Sequential(cnn_layers)
    cnn_net.build(input_shape=[None, 32, 32, 3])
    
    #测试一下卷积层的输出
    #x = tf.random.normal([4, 32, 32, 3])
    #out = cnn_net(x)
    #print(out.shape)

    #创建全连接层, 输出为100分类
    fc_net = Sequential([
        layers.Dense(256, activation='relu'),
        layers.Dense(128, activation='relu'),
        layers.Dense(100, activation=None),
    ])
    fc_net.build(input_shape=[None, 512])

    #设置优化器
    optimizer = optimizers.Adam(learning_rate=1e-4)

    #记录cnn层和全连接层所有可训练参数, 实现的效果类似list拼接,比如
    # [1, 2] + [3, 4] => [1, 2, 3, 4]
    variables = cnn_net.trainable_variables + fc_net.trainable_variables
    #进行训练
    num_epoches = 10
    for epoch in range(num_epoches):
        for step, (x,y) in enumerate(train_db):
            with tf.GradientTape() as tape:
                #[b, 32, 32, 3] => [b, 1, 1, 512]
                out = cnn_net(x)
                #flatten打平 => [b, 512]
                out = tf.reshape(out, [-1, 512])
                #使用全连接层做100分类logits输出
                #[b, 512] => [b, 100]
                logits = fc_net(out)
                #标签做one_hot encoding
                y_onehot = tf.one_hot(y, depth=100)
                #计算损失
                loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss = tf.reduce_mean(loss)
            #计算梯度
            grads = tape.gradient(loss, variables)
            #更新参数
            optimizer.apply_gradients(zip(grads, variables))

            if (step % 100 == 0):
                print("Epoch[", epoch + 1, "/", num_epoches, "]: step-", step, " loss:", float(loss))
        #进行验证
        total_samples = 0
        total_correct = 0
        for x,y in test_db:
            out = cnn_net(x)
            out = tf.reshape(out, [-1, 512])
            logits = fc_net(out)
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1)
            pred = tf.cast(pred, dtype=tf.int32)
            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_samples += x.shape[0]
            total_correct += int(correct)

        #统计准确率
        acc = total_correct / total_samples
        print("Epoch[", epoch + 1, "/", num_epoches, "]: accuracy:", acc)
if __name__ == '__main__':
    main()

运行结果:

相关推荐
PieroPc1 分钟前
Python 自动化 打开网站 填表登陆 例子
运维·python·自动化
Aileen_0v05 分钟前
【AI驱动的数据结构:包装类的艺术与科学】
linux·数据结构·人工智能·笔记·网络协议·tcp/ip·whisper
数信云 DCloud6 分钟前
实力认可 | 通付盾入选《ISC.AI 2024创新能力全景图谱》五项领域
人工智能
itwangyang5207 分钟前
AIDD - 从机器学习到深度学习:蛋白质-配体对接评分函数的进展
人工智能·深度学习·机器学习
jerry2011088 分钟前
机器学习常用术语
人工智能·机器学习
电报号dapp11910 分钟前
比特币市场震荡:回调背后的机遇与挑战
人工智能·去中心化·区块链·智能合约
AI_NEW_COME20 分钟前
构建全方位大健康零售帮助中心:提升服务与体验
大数据·人工智能
IT古董26 分钟前
【机器学习】机器学习的基本分类-强化学习-Actor-Critic 方法
人工智能·机器学习·分类
martian66526 分钟前
【人工智能数学基础】——深入详解贝叶斯理论:掌握贝叶斯定理及其在分类和预测中的应用
人工智能·数学·分类·数据挖掘·贝叶斯
mingo_敏27 分钟前
深度学习中的并行策略概述:2 Data Parallelism
人工智能·深度学习