- resnet分类器训练
import torch import torchvision from torchvision import transforms from torch.utils.data import random_split import torch.nn as nn import torch.optim as optim from torchvision.models import resnet50 # Define the transformation transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # Load the dataset data = torchvision.datasets.ImageFolder(root=r"D:\train_model\train_data_set", transform=transform) classes_set = data.classes # 保存类别信息到 classes.txt with open('classes.txt', 'w') as f: for class_name in classes_set: f.write(class_name + '\n') # Split the data into train and test sets train_size = int(0.8 * len(data)) test_size = len(data) - train_size train_data, test_data = random_split(data, [train_size, test_size]) # Optionally, you can load the train and test data into data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=False) # Define the model model = resnet50(pretrained=True) # Replace the last layer num_features = model.fc.in_features model.fc = nn.Linear(num_features, len(classes_set)) # Define the loss function and optimizer criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Move the model to the device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) # Define the number of epochs num_epochs = 10 # Train the model for epoch in range(num_epochs): # Train the model on the training set model.train() train_loss = 0.0 for i, (inputs, labels) in enumerate(train_loader): # Move the data to the device inputs = inputs.to(device) # inputs = inputs.float() labels = labels.to(device) # labels = labels.long() # Zero the parameter gradients optimizer.zero_grad() # Forward + backward + optimize outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # Update the training loss train_loss += loss.item() * inputs.size(0) # Evaluate the model on the test set model.eval() test_loss = 0.0 test_acc = 0.0 with torch.no_grad(): for i, (inputs, labels) in enumerate(test_loader): # Move the data to the device inputs = inputs.to(device) labels = labels.to(device) # Forward outputs = model(inputs) loss = criterion(outputs, labels) # Update the test loss and accuracy test_loss += loss.item() * inputs.size(0) _, preds = torch.max(outputs, 1) test_acc += torch.sum(preds == labels.data) # Print the training and test loss and accuracy train_loss /= len(train_data) test_loss /= len(test_data) test_acc = test_acc.double() / len(test_data) print(f"Epoch [{epoch + 1}/{num_epochs}] Train Loss: {train_loss:.4f} Test Loss: {test_loss:.4f} Test Acc: {test_acc:.4f}") # 保存模型参数 torch.save(model.state_dict(), './model/trained_model.pth')
resnet分类训练
E.K.江湖念书人2025-01-03 14:57
相关推荐
九章云极AladdinEdu6 小时前
超参数自动化调优指南:Optuna vs. Ray Tune 对比评测研梦非凡8 小时前
ICCV 2025|从粗到细:用于高效3D高斯溅射的可学习离散小波变换通街市密人有11 小时前
IDF: Iterative Dynamic Filtering Networks for Generalizable Image Denoising智数研析社11 小时前
9120 部 TMDb 高分电影数据集 | 7 列全维度指标 (评分 / 热度 / 剧情)+API 权威源 | 电影趋势分析 / 推荐系统 / NLP 建模用七元权12 小时前
论文阅读-Correlate and ExciteViperL113 小时前
[智能算法]可微的神经网络搜索算法-FBNet2202_7567496914 小时前
LLM大模型-大模型微调(常见微调方法、LoRA原理与实战、LLaMA-Factory工具部署与训练、模型量化QLoRA)人有一心14 小时前
深度学习中显性特征组合的网络结构crossNet猫天意14 小时前
【目标检测】metrice_curve和loss_curve对比图可视化HenrySmale16 小时前
05 回归问题和分类问题