- resnet分类器训练
import torch import torchvision from torchvision import transforms from torch.utils.data import random_split import torch.nn as nn import torch.optim as optim from torchvision.models import resnet50 # Define the transformation transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # Load the dataset data = torchvision.datasets.ImageFolder(root=r"D:\train_model\train_data_set", transform=transform) classes_set = data.classes # 保存类别信息到 classes.txt with open('classes.txt', 'w') as f: for class_name in classes_set: f.write(class_name + '\n') # Split the data into train and test sets train_size = int(0.8 * len(data)) test_size = len(data) - train_size train_data, test_data = random_split(data, [train_size, test_size]) # Optionally, you can load the train and test data into data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=False) # Define the model model = resnet50(pretrained=True) # Replace the last layer num_features = model.fc.in_features model.fc = nn.Linear(num_features, len(classes_set)) # Define the loss function and optimizer criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Move the model to the device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) # Define the number of epochs num_epochs = 10 # Train the model for epoch in range(num_epochs): # Train the model on the training set model.train() train_loss = 0.0 for i, (inputs, labels) in enumerate(train_loader): # Move the data to the device inputs = inputs.to(device) # inputs = inputs.float() labels = labels.to(device) # labels = labels.long() # Zero the parameter gradients optimizer.zero_grad() # Forward + backward + optimize outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # Update the training loss train_loss += loss.item() * inputs.size(0) # Evaluate the model on the test set model.eval() test_loss = 0.0 test_acc = 0.0 with torch.no_grad(): for i, (inputs, labels) in enumerate(test_loader): # Move the data to the device inputs = inputs.to(device) labels = labels.to(device) # Forward outputs = model(inputs) loss = criterion(outputs, labels) # Update the test loss and accuracy test_loss += loss.item() * inputs.size(0) _, preds = torch.max(outputs, 1) test_acc += torch.sum(preds == labels.data) # Print the training and test loss and accuracy train_loss /= len(train_data) test_loss /= len(test_data) test_acc = test_acc.double() / len(test_data) print(f"Epoch [{epoch + 1}/{num_epochs}] Train Loss: {train_loss:.4f} Test Loss: {test_loss:.4f} Test Acc: {test_acc:.4f}") # 保存模型参数 torch.save(model.state_dict(), './model/trained_model.pth')
resnet分类训练
E.K.江湖念书人2025-01-03 14:57
相关推荐
董建光d37 分钟前
【深度学习】目标检测全解析:定义、数据集、评估指标与主流算法星期天要睡觉1 小时前
计算机视觉(opencv)——基于 MediaPipe 的实时面部表情识别好家伙VCC1 小时前
**TensorFlow:发散创新的深度学习框架探索**随着人工智Dev7z1 小时前
深度学习与舌诊的结合:人工智能助力中医诊断新时代AIzealot无2 小时前
Qwen3 Embedding报告随笔渡我白衣3 小时前
《深度学习进阶(四)——多模态智能:语言、视觉与语音的融合》王一点er3 小时前
为什么LLM中KL散度需要近似计算悠闲蜗牛�3 小时前
深度学习与大规模系统构建:AI技术在实际项目中的应用Wah-Aug4 小时前
目标检测全解析:从基础概念到深度学习实战技术CoovallyAIHub4 小时前
清华Mars Lab发布SLAM-Former:用一个Transformer统一SLAM的前端与后端(附项目地址)