- resnet分类器训练
import torch import torchvision from torchvision import transforms from torch.utils.data import random_split import torch.nn as nn import torch.optim as optim from torchvision.models import resnet50 # Define the transformation transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # Load the dataset data = torchvision.datasets.ImageFolder(root=r"D:\train_model\train_data_set", transform=transform) classes_set = data.classes # 保存类别信息到 classes.txt with open('classes.txt', 'w') as f: for class_name in classes_set: f.write(class_name + '\n') # Split the data into train and test sets train_size = int(0.8 * len(data)) test_size = len(data) - train_size train_data, test_data = random_split(data, [train_size, test_size]) # Optionally, you can load the train and test data into data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=False) # Define the model model = resnet50(pretrained=True) # Replace the last layer num_features = model.fc.in_features model.fc = nn.Linear(num_features, len(classes_set)) # Define the loss function and optimizer criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Move the model to the device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) # Define the number of epochs num_epochs = 10 # Train the model for epoch in range(num_epochs): # Train the model on the training set model.train() train_loss = 0.0 for i, (inputs, labels) in enumerate(train_loader): # Move the data to the device inputs = inputs.to(device) # inputs = inputs.float() labels = labels.to(device) # labels = labels.long() # Zero the parameter gradients optimizer.zero_grad() # Forward + backward + optimize outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # Update the training loss train_loss += loss.item() * inputs.size(0) # Evaluate the model on the test set model.eval() test_loss = 0.0 test_acc = 0.0 with torch.no_grad(): for i, (inputs, labels) in enumerate(test_loader): # Move the data to the device inputs = inputs.to(device) labels = labels.to(device) # Forward outputs = model(inputs) loss = criterion(outputs, labels) # Update the test loss and accuracy test_loss += loss.item() * inputs.size(0) _, preds = torch.max(outputs, 1) test_acc += torch.sum(preds == labels.data) # Print the training and test loss and accuracy train_loss /= len(train_data) test_loss /= len(test_data) test_acc = test_acc.double() / len(test_data) print(f"Epoch [{epoch + 1}/{num_epochs}] Train Loss: {train_loss:.4f} Test Loss: {test_loss:.4f} Test Acc: {test_acc:.4f}") # 保存模型参数 torch.save(model.state_dict(), './model/trained_model.pth')
resnet分类训练
E.K.江湖念书人2025-01-03 14:57
相关推荐
果冻人工智能1 小时前
去中心化 AI:赋权还是混乱?伊犁纯流莱1 小时前
Normalizing flow 流模型 | CS236深度生成模型Lec8学习笔记Landy_Jay2 小时前
深度学习:基于Qwen复现DeepSeek R1的推理能力Daitu_Adam3 小时前
Windows11安装GPU版本Pytorch2.6教程阿正的梦工坊3 小时前
Grouped-Query Attention(GQA)详解: Pytorch实现YoseZang4 小时前
【机器学习】信息熵 交叉熵和相对熵数据智能老司机4 小时前
深度学习架构师手册——理解神经网络变换器(Transformers)next_travel5 小时前
图像分割UNet、生成模型SD及IP-Adapter东木月5 小时前
windows安装pytorch