神经网络保存-导入

保存

python 复制代码
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gzip
# fashion_mnist=tf.keras.datasets.fashion_mnist
# (train_images,train_labels),(test_images,test_labels)=fashion_mnist.load_data()
 
#数据在个人资源里面,放到该文件目录中即可
def load_data():
#     dirname = os.path.join('datasets', 'fashion-mnist')
#     base = 'https://storage.googleapis.com/tensorflow/tf-ke ras-datasets/'
    files = [
      'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
      't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
    ]
 
    paths = []
    for fname in files:
        paths.append(fname)
 
    with gzip.open(paths[0], 'rb') as lbpath:
        y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
 
    with gzip.open(paths[1], 'rb') as imgpath:
        x_train = np.frombuffer(
        imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
 
    with gzip.open(paths[2], 'rb') as lbpath:
        y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
 
    with gzip.open(paths[3], 'rb') as imgpath:
        x_test = np.frombuffer(
        imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
 
    return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test)=load_data()

x_train=np.expand_dims(x_train,-1)

y_train_one_hot=tf.one_hot(y_train,10).numpy()
x_train=np.float32(x_train)

model=tf.keras.Sequential([
    tf.keras.layers.Conv2D(1,3,1),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(256,activation="relu"),
    tf.keras.layers.Dense(128,activation="relu"),
    tf.keras.layers.Dense(64,activation="relu"),
    tf.keras.layers.Dense(32,activation="relu"),
    tf.keras.layers.Dense(10,activation="softmax")
])
 
model.build(input_shape=[None,28,28,1])
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(),loss=tf.keras.losses.CategoricalCrossentropy(),metrics=[tf.keras.losses.CategoricalCrossentropy()])

import os
checkpoint_path="training_1/cp.ckpt"
cp_callback=tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,save_weights_only=True,verbose=1)


history=model.fit(x_train,y_train_one_hot,epochs=10,callbacks=[cp_callback])
LOSS=history.history["loss"]
plt.plot(LOSS)
plt.show()

导入

python 复制代码
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gzip
# fashion_mnist=tf.keras.datasets.fashion_mnist
# (train_images,train_labels),(test_images,test_labels)=fashion_mnist.load_data()
 
#数据在个人资源里面,放到该文件目录中即可
def load_data():
#     dirname = os.path.join('datasets', 'fashion-mnist')
#     base = 'https://storage.googleapis.com/tensorflow/tf-ke ras-datasets/'
    files = [
      'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
      't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
    ]
 
    paths = []
    for fname in files:
        paths.append(fname)
 
    with gzip.open(paths[0], 'rb') as lbpath:
        y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
 
    with gzip.open(paths[1], 'rb') as imgpath:
        x_train = np.frombuffer(
        imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
 
    with gzip.open(paths[2], 'rb') as lbpath:
        y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
 
    with gzip.open(paths[3], 'rb') as imgpath:
        x_test = np.frombuffer(
        imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
 
    return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test)=load_data()

x_test=np.expand_dims(x_test,-1)
model=tf.keras.Sequential([
    tf.keras.layers.Conv2D(1,3,1),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(256,activation="relu"),
    tf.keras.layers.Dense(128,activation="relu"),
    tf.keras.layers.Dense(64,activation="relu"),
    tf.keras.layers.Dense(32,activation="relu"),
    tf.keras.layers.Dense(10,activation="softmax")
])
model.build(input_shape=[None,28,28,1])
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(),loss=tf.keras.losses.CategoricalCrossentropy(),metrics=[tf.keras.losses.CategoricalCrossentropy()])

checkpoint_path="training_1/cp.ckpt"
model.load_weights(checkpoint_path)

x_test=np.array(x_test,dtype=np.float32)
print(np.argmax(model.predict(x_test),axis=1))
print(y_test)
np.sum((y_test==np.argmax(model.predict(x_test),axis=1))*1)/y_test.shape[0]
相关推荐
爱的叹息8 分钟前
关于 传感器 的详细解析,涵盖定义、分类、工作原理、常见类型、应用领域、技术挑战及未来趋势,结合实例帮助理解其核心概念
人工智能·机器人
恶霸不委屈9 分钟前
突破精度极限!基于DeepSeek的无人机航拍图像智能校准系统技术解析
人工智能·python·无人机·deepseek
lixy57944 分钟前
深度学习之自动微分
人工智能·python·深度学习
量子位1 小时前
飞猪 AI 意外出圈!邀请码被黄牛倒卖,分分钟搞定机酒预订,堪比专业定制团队
人工智能·llm·aigc
量子位1 小时前
趣丸科技贾朔:AI 音乐迎来应用元年,五年内将重构产业格局|中国 AIGC 产业峰会
人工智能·aigc
量子位1 小时前
粉笔 CTO:大模型打破教育「不可能三角」,因材施教真正成为可能|中国 AIGC 产业峰会
人工智能·aigc
神经星星1 小时前
【TVM教程】microTVM TFLite 指南
人工智能·机器学习·编程语言
Listennnn1 小时前
GPT,Bert类模型对比
人工智能·gpt·自然语言处理·bert
量子位1 小时前
最强视觉生成模型获马斯克连夜关注,吉卜力风格转绘不再需要 GPT 了
人工智能·llm
arbboter2 小时前
【AI插件开发】Notepad++ AI插件开发实践:实现对话窗口功能
人工智能·notepad++·notepad++插件开发·ai对话窗口·异步模型调用·实时输出渲染·动态模型切换