下面给你一个**"基于循环神经网络(RNN/LSTM/GRU)做电池剩余寿命(RUL)预测"的可直接跑的代码示例**(PyTorch 版),包含:数据→滑动窗口→RNN 模型→训练→评估→推理。你把自己的电池序列特征(电压/电流/温度/容量等)塞进去就能用。
1)思路(你要准备什么数据)
常见做法:把每块电池的时间序列按窗口切片
- 输入
X:过去seq_len步的特征序列(形状[seq_len, num_features]) - 标签
y:对应时刻的 RUL(剩余寿命,可以用"还剩多少个循环/多少小时/多少步")
例如:
seq_len=50,用过去 50 个循环的(容量、温度、内阻...)预测当前点的 RUL。
2)完整 PyTorch 代码示例(可直接运行)
python
import math
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
# -----------------------------
# 0. 随机造一个"电池退化数据"示例(你实际用真实数据替换这里)
# -----------------------------
def make_synthetic_battery(T=600, num_features=3, noise=0.01, seed=0):
rng = np.random.default_rng(seed)
# 假设特征:容量(cap)、温度(temp)、内阻(res)
t = np.arange(T)
cap = 1.0 - 0.0009 * t + 0.02 * np.sin(t / 30) # 容量缓慢下降
temp = 25 + 2.0 * np.sin(t / 15) + 0.2 * rng.normal(size=T)
res = 0.05 + 0.0002 * t + 0.002 * rng.normal(size=T)
X = np.stack([cap, temp, res], axis=1)
X += noise * rng.normal(size=X.shape)
# 定义 RUL:离终点还剩多少步(这里把寿命终点设为 T-1)
rul = (T - 1) - t # 越往后RUL越小
return X.astype(np.float32), rul.astype(np.float32)
# -----------------------------
# 1. 滑动窗口 Dataset:把序列切成 (seq_len -> 预测当前点RUL)
# -----------------------------
class RULWindowDataset(Dataset):
def __init__(self, X, y, seq_len=50, stride=1, normalize=True):
"""
X: [T, F]
y: [T]
"""
assert len(X) == len(y)
self.seq_len = seq_len
self.stride = stride
X = X.copy()
y = y.copy()
# 可选:对特征做标准化(训练集统计均值方差)
if normalize:
self.mu = X.mean(axis=0, keepdims=True)
self.sigma = X.std(axis=0, keepdims=True) + 1e-8
X = (X - self.mu) / self.sigma
else:
self.mu = None
self.sigma = None
self.X = X
self.y = y
# 生成所有窗口起点
self.starts = list(range(0, len(X) - seq_len + 1, stride))
def __len__(self):
return len(self.starts)
def __getitem__(self, idx):
s = self.starts[idx]
e = s + self.seq_len
x_seq = self.X[s:e] # [seq_len, F]
y_target = self.y[e - 1] # 预测窗口最后一个时刻的RUL
return torch.from_numpy(x_seq), torch.tensor([y_target], dtype=torch.float32)
# -----------------------------
# 2. RNN模型(推荐 LSTM / GRU)
# -----------------------------
class RULPredictor(nn.Module):
def __init__(self, num_features, hidden_size=64, num_layers=2,
rnn_type="lstm", dropout=0.1):
super().__init__()
rnn_type = rnn_type.lower()
if rnn_type == "rnn":
self.rnn = nn.RNN(
input_size=num_features,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.0
)
elif rnn_type == "gru":
self.rnn = nn.GRU(
input_size=num_features,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.0
)
else: # lstm
self.rnn = nn.LSTM(
input_size=num_features,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.0
)
# 回归头:用最后时刻隐状态做RUL回归
self.head = nn.Sequential(
nn.Linear(hidden_size, hidden_size // 2),
nn.ReLU(),
nn.Linear(hidden_size // 2, 1)
)
def forward(self, x):
"""
x: [B, seq_len, F]
"""
out, _ = self.rnn(x) # out: [B, seq_len, H]
last = out[:, -1, :] # [B, H]
y = self.head(last) # [B, 1]
return y
# -----------------------------
# 3. 训练 & 评估
# -----------------------------
def rmse(pred, true):
return math.sqrt(((pred - true) ** 2).mean())
def train_one_epoch(model, loader, optimizer, loss_fn, device):
model.train()
total_loss = 0.0
for x, y in loader:
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
pred = model(x)
loss = loss_fn(pred, y)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
total_loss += loss.item() * x.size(0)
return total_loss / len(loader.dataset)
@torch.no_grad()
def evaluate(model, loader, loss_fn, device):
model.eval()
preds, trues = [], []
total_loss = 0.0
for x, y in loader:
x, y = x.to(device), y.to(device)
pred = model(x)
loss = loss_fn(pred, y)
total_loss += loss.item() * x.size(0)
preds.append(pred.cpu().numpy())
trues.append(y.cpu().numpy())
preds = np.concatenate(preds, axis=0).squeeze()
trues = np.concatenate(trues, axis=0).squeeze()
return total_loss / len(loader.dataset), rmse(preds, trues)
def main():
# ------- 超参数 -------
seq_len = 50
batch_size = 64
lr = 1e-3
epochs = 20
rnn_type = "lstm" # "rnn" / "gru" / "lstm"
# ------- 数据(示例)-------
X, y = make_synthetic_battery(T=800, num_features=3)
T = len(X)
# 简单划分:前 70% 训练,后 30% 测试(真实项目建议按电池ID划分防泄漏)
split = int(T * 0.7)
X_train, y_train = X[:split], y[:split]
X_test, y_test = X[split - seq_len + 1:], y[split - seq_len + 1:] # 保证窗口可用
train_ds = RULWindowDataset(X_train, y_train, seq_len=seq_len, stride=1, normalize=True)
# 测试集用训练集统计的均值方差更合理:这里简单起见重新normalize,你可改为复用train_ds的mu/sigma
test_ds = RULWindowDataset(X_test, y_test, seq_len=seq_len, stride=1, normalize=True)
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, drop_last=False)
test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=False, drop_last=False)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = RULPredictor(num_features=X.shape[1], hidden_size=64, num_layers=2, rnn_type=rnn_type).to(device)
loss_fn = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# ------- 训练 -------
for ep in range(1, epochs + 1):
train_loss = train_one_epoch(model, train_loader, optimizer, loss_fn, device)
test_loss, test_rmse = evaluate(model, test_loader, loss_fn, device)
print(f"Epoch {ep:02d} | train_loss={train_loss:.4f} | test_loss={test_loss:.4f} | test_RMSE={test_rmse:.2f}")
# ------- 推理:拿一段序列预测RUL -------
x0, y0 = test_ds[0]
pred0 = model(x0.unsqueeze(0).to(device)).item()
print(f"\nExample inference: true RUL={y0.item():.1f}, predicted RUL={pred0:.1f}")
if __name__ == "__main__":
main()