python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import pandas as pd
import numpy as np
class CustomDataset(Dataset):
def __init__(self, data, labels, transform=None):
self.data = data
self.labels = labels
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data[idx]
label = self.labels[idx]
if self.transform:
sample = self.transform(sample)
return sample, label
# 示例数据
data = np.random.rand(100, 10) # 100个样本,每个样本10个特征
labels = np.random.randint(0, 2, 100) # 二分类标签
# 转换为torch的Tensor
data = torch.tensor(data, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.long)
# 创建数据集和数据加载器
dataset = CustomDataset(data, labels)
dataloader = DataLoader(dataset, batch_size=16, shuffle=True)
class LogisticRegression(nn.Module):
def __init__(self, input_dim):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(input_dim, 1)
def forward(self, x):
out = torch.sigmoid(self.linear(x))
return out
# 参数设置
input_dim = data.shape[1]
num_epochs = 20
learning_rate = 0.01
# 初始化模型、损失函数和优化器
model = LogisticRegression(input_dim)
criterion = nn.BCELoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# 训练模型
for epoch in range(num_epochs):
for inputs, labels in dataloader:
# 将标签转换为浮点型
labels = labels.float().unsqueeze(1)
# 前向传播
outputs = model(inputs)
loss = criterion(outputs, labels)
# 反向传播和优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
# 保存模型权重和架构
torch.save(model.state_dict(), 'logistic_regression_model.pth')
# 加载模型
loaded_model = LogisticRegression(input_dim)
loaded_model.load_state_dict(torch.load('logistic_regression_model.pth'))
loaded_model.eval() # 切换为评估模式
# 进行预测
with torch.no_grad():
sample = torch.tensor(np.random.rand(1, 10), dtype=torch.float32) # 一个新的样本
prediction = loaded_model(sample)
print(f'Prediction: {prediction.item()}')