深度学习笔记21_优化器对比实验

一、我的环境

1.语言环境:Python 3.9

2.编译器:Pycharm

3.深度学习环境:TensorFlow 2.10.0

二、GPU设置

若使用的是cpu则可忽略

复制代码
import tensorflow as tf
gpus = tf.config.list_physical_devices("GPU")

if gpus:
    gpu0 = gpus[0] #如果有多个GPU,仅使用第0个GPU
    tf.config.experimental.set_memory_growth(gpu0, True) #设置GPU显存用量按需使用
    tf.config.set_visible_devices([gpu0],"GPU")

from tensorflow          import keras
import matplotlib.pyplot as plt
import pandas            as pd
import numpy             as np
import warnings,os,PIL,pathlib

warnings.filterwarnings("ignore")             #忽略警告信息
plt.rcParams['font.sans-serif']    = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False    # 用来正常显示负号

三**、导入数据**

python 复制代码
data_dir    = "./data"
data_dir    = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*')))
print("图片总数为:",image_count)
python 复制代码
batch_size = 16
img_height = 336
img_width  = 336
python 复制代码
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.2,
    subset="training",
    seed=12,
    image_size=(img_height, img_width),
    batch_size=batch_size)
python 复制代码
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
    data_dir,
    validation_split=0.2,
    subset="validation",
    seed=12,
    image_size=(img_height, img_width),
    batch_size=batch_size)
python 复制代码
class_names = train_ds.class_names
print(class_names)

运行结果:

python 复制代码
['Angelina Jolie', 'Brad Pitt', 'Denzel Washington', 'Hugh Jackman', 'Jennifer Lawrence', 'Johnny Depp', 'Kate Winslet', 'Leonardo DiCaprio', 'Megan Fox', 'Natalie Portman', 'Nicole Kidman', 'Robert Downey Jr', 'Sandra Bullock', 'Scarlett Johansson', 'Tom Cruise', 'Tom Hanks', 'Will Smith']

四**、检查数据**

python 复制代码
for image_batch, labels_batch in train_ds:
    print(image_batch.shape)
    print(labels_batch.shape)
    break
#(16, 336, 336, 3)
#(16,)

五**、配置数据集**

python 复制代码
AUTOTUNE = tf.data.AUTOTUNE

def train_preprocessing(image,label):
    return (image/255.0,label)

train_ds = (
    train_ds.cache()
    .shuffle(1000)
    .map(train_preprocessing)    # 这里可以设置预处理函数
#     .batch(batch_size)           # 在image_dataset_from_directory处已经设置了batch_size
    .prefetch(buffer_size=AUTOTUNE)
)

val_ds = (
    val_ds.cache()
    .shuffle(1000)
    .map(train_preprocessing)    # 这里可以设置预处理函数
#     .batch(batch_size)         # 在image_dataset_from_directory处已经设置了batch_size
    .prefetch(buffer_size=AUTOTUNE)
)

六、数据可视化

python 复制代码
plt.figure(figsize=(10, 8))  # 图形的宽为10高为5
plt.suptitle("数据展示")

for images, labels in train_ds.take(1):
    for i in range(15):
        plt.subplot(4, 5, i + 1)
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)

        # 显示图片
        plt.imshow(images[i])
        # 显示标签
        plt.xlabel(class_names[labels[i]-1])

plt.show()

运行结果:

七、构建模型

python 复制代码
from tensorflow.keras.layers import Dropout,Dense,BatchNormalization
from tensorflow.keras.models import Model

def create_model(optimizer='adam'):
    # 加载预训练模型
    vgg16_base_model = tf.keras.applications.vgg16.VGG16(weights='imagenet',
                                                                include_top=False,
                                                                input_shape=(img_width, img_height, 3),
                                                                pooling='avg')
    for layer in vgg16_base_model.layers:
        layer.trainable = False

    X = vgg16_base_model.output
    
    X = Dense(170, activation='relu')(X)
    X = BatchNormalization()(X)
    X = Dropout(0.5)(X)

    output = Dense(len(class_names), activation='softmax')(X)
    vgg16_model = Model(inputs=vgg16_base_model.input, outputs=output)

    vgg16_model.compile(optimizer=optimizer,
                        loss='sparse_categorical_crossentropy',
                        metrics=['accuracy'])
    return vgg16_model

model1 = create_model(optimizer=tf.keras.optimizers.Adam())
model2 = create_model(optimizer=tf.keras.optimizers.SGD())
model2.summary()

运行结果:

python 复制代码
Model: "model_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #
=================================================================
 input_2 (InputLayer)        [(None, 336, 336, 3)]     0

 block1_conv1 (Conv2D)       (None, 336, 336, 64)      1792

 block1_conv2 (Conv2D)       (None, 336, 336, 64)      36928

 block1_pool (MaxPooling2D)  (None, 168, 168, 64)      0

 block2_conv1 (Conv2D)       (None, 168, 168, 128)     73856

 block2_conv2 (Conv2D)       (None, 168, 168, 128)     147584

 block2_pool (MaxPooling2D)  (None, 84, 84, 128)       0

 block3_conv1 (Conv2D)       (None, 84, 84, 256)       295168

 block3_conv2 (Conv2D)       (None, 84, 84, 256)       590080

 block3_conv3 (Conv2D)       (None, 84, 84, 256)       590080

 block3_pool (MaxPooling2D)  (None, 42, 42, 256)       0

 block4_conv1 (Conv2D)       (None, 42, 42, 512)       1180160

 block4_conv2 (Conv2D)       (None, 42, 42, 512)       2359808

 block4_conv3 (Conv2D)       (None, 42, 42, 512)       2359808

 block4_pool (MaxPooling2D)  (None, 21, 21, 512)       0

 block5_conv1 (Conv2D)       (None, 21, 21, 512)       2359808

 block5_conv2 (Conv2D)       (None, 21, 21, 512)       2359808

 block5_conv3 (Conv2D)       (None, 21, 21, 512)       2359808

 block5_pool (MaxPooling2D)  (None, 10, 10, 512)       0

 global_average_pooling2d_1   (None, 512)              0
 (GlobalAveragePooling2D)

 dense_2 (Dense)             (None, 170)               87210

 batch_normalization_1 (Batc  (None, 170)              680
 hNormalization)

 dropout_1 (Dropout)         (None, 170)               0

 dense_3 (Dense)             (None, 17)                2907

=================================================================
Total params: 14,805,485
Trainable params: 90,457
Non-trainable params: 14,715,028
_________________________________________________________________

八、训练模型

python 复制代码
NO_EPOCHS = 50

history_model1  = model1.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)
history_model2  = model2.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)NO_EPOCHS = 50

history_model1  = model1.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)
history_model2  = model2.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)

运行结果:

python 复制代码
Epoch 1/50
90/90 [==============================] - 14s 140ms/step - loss: 3.1176 - accuracy: 0.1035 - val_loss: 2.7743 - val_accuracy: 0.1972
Epoch 2/50
90/90 [==============================] - 12s 137ms/step - loss: 2.5185 - accuracy: 0.2181 - val_loss: 2.6079 - val_accuracy: 0.2056
Epoch 3/50
90/90 [==============================] - 12s 137ms/step - loss: 2.2428 - accuracy: 0.2757 - val_loss: 2.4016 - val_accuracy: 0.3000
Epoch 4/50
90/90 [==============================] - 12s 137ms/step - loss: 2.0429 - accuracy: 0.3458 - val_loss: 2.2346 - val_accuracy: 0.3083
Epoch 5/50
90/90 [==============================] - 12s 137ms/step - loss: 1.9355 - accuracy: 0.3917 - val_loss: 2.0221 - val_accuracy: 0.3778
Epoch 6/50
90/90 [==============================] - 12s 137ms/step - loss: 1.8070 - accuracy: 0.3965 - val_loss: 1.8872 - val_accuracy: 0.4250
Epoch 7/50
90/90 [==============================] - 12s 137ms/step - loss: 1.7000 - accuracy: 0.4500 - val_loss: 1.8404 - val_accuracy: 0.4167
Epoch 8/50
90/90 [==============================] - 12s 137ms/step - loss: 1.6306 - accuracy: 0.4757 - val_loss: 1.8395 - val_accuracy: 0.3972
Epoch 9/50
90/90 [==============================] - 12s 137ms/step - loss: 1.5473 - accuracy: 0.4979 - val_loss: 1.6730 - val_accuracy: 0.4806
Epoch 10/50
90/90 [==============================] - 12s 137ms/step - loss: 1.4987 - accuracy: 0.5299 - val_loss: 1.6955 - val_accuracy: 0.4500
Epoch 11/50
90/90 [==============================] - 12s 138ms/step - loss: 1.4504 - accuracy: 0.5507 - val_loss: 1.6891 - val_accuracy: 0.4806
Epoch 12/50
90/90 [==============================] - 12s 137ms/step - loss: 1.3725 - accuracy: 0.5722 - val_loss: 1.7054 - val_accuracy: 0.4500
Epoch 13/50
90/90 [==============================] - 12s 137ms/step - loss: 1.3657 - accuracy: 0.5528 - val_loss: 1.5714 - val_accuracy: 0.5028
Epoch 14/50
90/90 [==============================] - 12s 137ms/step - loss: 1.3056 - accuracy: 0.5972 - val_loss: 1.5478 - val_accuracy: 0.5139
Epoch 15/50
90/90 [==============================] - 12s 138ms/step - loss: 1.2539 - accuracy: 0.6028 - val_loss: 1.5626 - val_accuracy: 0.5000
Epoch 16/50
90/90 [==============================] - 12s 138ms/step - loss: 1.2368 - accuracy: 0.6153 - val_loss: 1.5329 - val_accuracy: 0.5028
Epoch 17/50
90/90 [==============================] - 12s 136ms/step - loss: 1.1918 - accuracy: 0.6132 - val_loss: 1.5597 - val_accuracy: 0.5000
Epoch 18/50
90/90 [==============================] - 12s 137ms/step - loss: 1.1848 - accuracy: 0.6208 - val_loss: 1.4749 - val_accuracy: 0.4972
Epoch 19/50
90/90 [==============================] - 12s 135ms/step - loss: 1.1555 - accuracy: 0.6465 - val_loss: 1.7383 - val_accuracy: 0.4167
Epoch 20/50
90/90 [==============================] - 12s 137ms/step - loss: 1.1110 - accuracy: 0.6507 - val_loss: 1.6035 - val_accuracy: 0.5000
Epoch 21/50
90/90 [==============================] - 12s 137ms/step - loss: 1.0790 - accuracy: 0.6743 - val_loss: 1.4571 - val_accuracy: 0.5333
Epoch 22/50
90/90 [==============================] - 12s 132ms/step - loss: 1.0522 - accuracy: 0.6687 - val_loss: 1.3678 - val_accuracy: 0.5750
Epoch 23/50
90/90 [==============================] - 12s 136ms/step - loss: 1.0175 - accuracy: 0.6861 - val_loss: 1.4401 - val_accuracy: 0.5056
Epoch 24/50
90/90 [==============================] - 12s 138ms/step - loss: 0.9935 - accuracy: 0.6944 - val_loss: 1.6090 - val_accuracy: 0.5056
Epoch 25/50
90/90 [==============================] - 12s 138ms/step - loss: 0.9755 - accuracy: 0.6944 - val_loss: 1.5200 - val_accuracy: 0.5000
Epoch 26/50
90/90 [==============================] - 12s 137ms/step - loss: 0.9349 - accuracy: 0.7243 - val_loss: 1.5443 - val_accuracy: 0.5000
Epoch 27/50
90/90 [==============================] - 12s 137ms/step - loss: 0.9127 - accuracy: 0.7139 - val_loss: 1.3795 - val_accuracy: 0.5639
Epoch 28/50
90/90 [==============================] - 12s 137ms/step - loss: 0.8983 - accuracy: 0.7139 - val_loss: 1.3948 - val_accuracy: 0.5778
Epoch 29/50
90/90 [==============================] - 12s 138ms/step - loss: 0.8580 - accuracy: 0.7271 - val_loss: 1.4376 - val_accuracy: 0.5389
Epoch 30/50
90/90 [==============================] - 12s 138ms/step - loss: 0.8537 - accuracy: 0.7312 - val_loss: 1.3611 - val_accuracy: 0.5833
Epoch 31/50
90/90 [==============================] - 12s 138ms/step - loss: 0.8291 - accuracy: 0.7306 - val_loss: 1.4406 - val_accuracy: 0.5417
Epoch 32/50
90/90 [==============================] - 12s 138ms/step - loss: 0.8300 - accuracy: 0.7340 - val_loss: 1.3366 - val_accuracy: 0.5833
Epoch 33/50
90/90 [==============================] - 13s 139ms/step - loss: 0.7823 - accuracy: 0.7736 - val_loss: 1.3799 - val_accuracy: 0.5556
Epoch 34/50
90/90 [==============================] - 12s 139ms/step - loss: 0.7909 - accuracy: 0.7646 - val_loss: 1.3132 - val_accuracy: 0.5917
Epoch 35/50
90/90 [==============================] - 12s 139ms/step - loss: 0.7966 - accuracy: 0.7583 - val_loss: 1.4601 - val_accuracy: 0.5361
Epoch 36/50
90/90 [==============================] - 12s 138ms/step - loss: 0.7554 - accuracy: 0.7514 - val_loss: 1.3837 - val_accuracy: 0.5528
Epoch 37/50
90/90 [==============================] - 12s 138ms/step - loss: 0.7400 - accuracy: 0.7715 - val_loss: 1.4580 - val_accuracy: 0.5667
Epoch 38/50
90/90 [==============================] - 12s 138ms/step - loss: 0.6991 - accuracy: 0.7889 - val_loss: 1.5139 - val_accuracy: 0.5083
Epoch 39/50
90/90 [==============================] - 12s 132ms/step - loss: 0.6953 - accuracy: 0.7875 - val_loss: 1.5710 - val_accuracy: 0.5083
Epoch 40/50
90/90 [==============================] - 12s 138ms/step - loss: 0.6927 - accuracy: 0.7868 - val_loss: 1.4386 - val_accuracy: 0.5194
Epoch 41/50
90/90 [==============================] - 12s 139ms/step - loss: 0.6569 - accuracy: 0.7910 - val_loss: 1.3502 - val_accuracy: 0.5583
Epoch 42/50
90/90 [==============================] - 13s 139ms/step - loss: 0.6406 - accuracy: 0.8076 - val_loss: 1.4855 - val_accuracy: 0.5639
Epoch 43/50
90/90 [==============================] - 12s 137ms/step - loss: 0.6585 - accuracy: 0.7979 - val_loss: 1.6144 - val_accuracy: 0.5194
Epoch 44/50
90/90 [==============================] - 12s 137ms/step - loss: 0.6393 - accuracy: 0.7944 - val_loss: 1.4049 - val_accuracy: 0.6056
Epoch 45/50
90/90 [==============================] - 12s 137ms/step - loss: 0.6111 - accuracy: 0.8146 - val_loss: 1.4132 - val_accuracy: 0.5611
Epoch 46/50
90/90 [==============================] - 12s 137ms/step - loss: 0.6233 - accuracy: 0.7993 - val_loss: 1.5067 - val_accuracy: 0.5417
Epoch 47/50
90/90 [==============================] - 12s 137ms/step - loss: 0.5802 - accuracy: 0.8208 - val_loss: 1.5511 - val_accuracy: 0.5250
Epoch 48/50
90/90 [==============================] - 12s 137ms/step - loss: 0.5785 - accuracy: 0.8104 - val_loss: 1.3737 - val_accuracy: 0.5583
Epoch 49/50
90/90 [==============================] - 12s 137ms/step - loss: 0.5717 - accuracy: 0.8333 - val_loss: 1.3962 - val_accuracy: 0.5833
Epoch 50/50
90/90 [==============================] - 12s 137ms/step - loss: 0.5532 - accuracy: 0.8271 - val_loss: 1.5394 - val_accuracy: 0.5417

九、评估模型

python 复制代码
from matplotlib.ticker import MultipleLocator
plt.rcParams['savefig.dpi'] = 300 #图片像素
plt.rcParams['figure.dpi']  = 300 #分辨率

acc1     = history_model1.history['accuracy']
acc2     = history_model2.history['accuracy']
val_acc1 = history_model1.history['val_accuracy']
val_acc2 = history_model2.history['val_accuracy']

loss1     = history_model1.history['loss']
loss2     = history_model2.history['loss']
val_loss1 = history_model1.history['val_loss']
val_loss2 = history_model2.history['val_loss']

epochs_range = range(len(acc1))

plt.figure(figsize=(16, 4))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, acc1, label='Training Accuracy-Adam')
plt.plot(epochs_range, acc2, label='Training Accuracy-SGD')
plt.plot(epochs_range, val_acc1, label='Validation Accuracy-Adam')
plt.plot(epochs_range, val_acc2, label='Validation Accuracy-SGD')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
# 设置刻度间隔,x轴每1一个刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss1, label='Training Loss-Adam')
plt.plot(epochs_range, loss2, label='Training Loss-SGD')
plt.plot(epochs_range, val_loss1, label='Validation Loss-Adam')
plt.plot(epochs_range, val_loss2, label='Validation Loss-SGD')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
   
# 设置刻度间隔,x轴每1一个刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))

plt.show()

运行结果:

python 复制代码
def test_accuracy_report(model):
    score = model.evaluate(val_ds, verbose=0)
    print('Loss function: %s, accuracy:' % score[0], score[1])
    
test_accuracy_report(model2)

运行结果:

python 复制代码
Loss function: 1.5393909215927124, accuracy: 0.5416666865348816

十、总结

本周学习使用VGG16模型进行人脸识别,通过改变优化器得到不一样的效果;一般来说,Adam 是最好的选择,可以通过实验改变优化器来寻找最佳。

相关推荐
moonsims10 分钟前
SKYTRAC-无人机、无人机系统和城市空中交通卫星通信 – BVLOS 和 C2 卫星通信终端和任务服务器
人工智能
云卓SKYDROID12 分钟前
无人机电压模块技术剖析
人工智能·无人机·电压·高科技·云卓科技
Codebee18 分钟前
使用Qoder 改造前端UI/UE升级改造实践:从传统界面到现代化体验的华丽蜕变
前端·人工智能
用户51914958484523 分钟前
Apache服务器自动化运维与安全加固脚本详解
人工智能·aigc
yintele28 分钟前
智能AI汽车电子行业,EMS应用相关问题
人工智能·汽车
却道天凉_好个秋36 分钟前
深度学习(四):数据集划分
人工智能·深度学习·数据集
数字冰雹40 分钟前
“图观”端渲染场景编辑器
人工智能·编辑器
里昆40 分钟前
【AI】Tensorflow在jupyterlab中运行要注意的问题
人工智能·python·tensorflow
清木!41 分钟前
数据仓库详解
笔记
荼蘼1 小时前
OpenCV 高阶 图像金字塔 用法解析及案例实现
人工智能·opencv·计算机视觉