- resnet分类器训练
import torch import torchvision from torchvision import transforms from torch.utils.data import random_split import torch.nn as nn import torch.optim as optim from torchvision.models import resnet50 # Define the transformation transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # Load the dataset data = torchvision.datasets.ImageFolder(root=r"D:\train_model\train_data_set", transform=transform) classes_set = data.classes # 保存类别信息到 classes.txt with open('classes.txt', 'w') as f: for class_name in classes_set: f.write(class_name + '\n') # Split the data into train and test sets train_size = int(0.8 * len(data)) test_size = len(data) - train_size train_data, test_data = random_split(data, [train_size, test_size]) # Optionally, you can load the train and test data into data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=False) # Define the model model = resnet50(pretrained=True) # Replace the last layer num_features = model.fc.in_features model.fc = nn.Linear(num_features, len(classes_set)) # Define the loss function and optimizer criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Move the model to the device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) # Define the number of epochs num_epochs = 10 # Train the model for epoch in range(num_epochs): # Train the model on the training set model.train() train_loss = 0.0 for i, (inputs, labels) in enumerate(train_loader): # Move the data to the device inputs = inputs.to(device) # inputs = inputs.float() labels = labels.to(device) # labels = labels.long() # Zero the parameter gradients optimizer.zero_grad() # Forward + backward + optimize outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # Update the training loss train_loss += loss.item() * inputs.size(0) # Evaluate the model on the test set model.eval() test_loss = 0.0 test_acc = 0.0 with torch.no_grad(): for i, (inputs, labels) in enumerate(test_loader): # Move the data to the device inputs = inputs.to(device) labels = labels.to(device) # Forward outputs = model(inputs) loss = criterion(outputs, labels) # Update the test loss and accuracy test_loss += loss.item() * inputs.size(0) _, preds = torch.max(outputs, 1) test_acc += torch.sum(preds == labels.data) # Print the training and test loss and accuracy train_loss /= len(train_data) test_loss /= len(test_data) test_acc = test_acc.double() / len(test_data) print(f"Epoch [{epoch + 1}/{num_epochs}] Train Loss: {train_loss:.4f} Test Loss: {test_loss:.4f} Test Acc: {test_acc:.4f}") # 保存模型参数 torch.save(model.state_dict(), './model/trained_model.pth')
resnet分类训练
E.K.江湖念书人2025-01-03 14:57
相关推荐
odoo中国17 分钟前
深度学习 Deep Learning 第20章 深度生成模型墨绿色的摆渡人1 小时前
pytorch小记(十六):PyTorch中的`nn.Identity()`详解:灵活模型设计的秘密武器懒羊羊不进村1 小时前
Python深度学习基础——深度神经网络(DNN)(PyTorch)新加坡内哥谈技术2 小时前
Llama 4的争议_一条咸鱼_3 小时前
深度解析 AI 大模型 LoRA 微调原理欲掩4 小时前
神经网络与深度学习:案例与实践——第三章(2)简放4 小时前
使用pip3安装PyTorch和PyGY1nhl5 小时前
搜广推校招面经七十一人大博士的交易之路5 小时前
今日行情明日机会——20250409蔗理苦5 小时前
2025-04-09 吴恩达机器学习6——神经网络(1):介绍