一、算法原理与核心思想
1. 基于模型的强化学习
基于模型的强化学习(Model-Based RL)通过构建环境动态模型,在虚拟环境中进行轨迹规划和策略优化。与无模型方法相比,它具备以下优势:
-
数据高效:减少真实环境交互次数
-
规划能力强:通过模型预测进行长序列决策
-
安全可控:在虚拟环境中测试高风险动作
2. PETS 算法核心
PETS(Probabilistic Ensembles with Trajectory Sampling)通过概率集成模型 和交叉熵优化实现高效规划:
-
概率集成模型:使用多个神经网络建模环境动态,捕捉不确定性
-
轨迹优化:通过交叉熵方法(CEM)生成最优动作序列
算法优势
特性 | 说明 |
---|---|
高数据利用率 | 单次环境交互数据可复用多次 |
强抗干扰能力 | 概率模型有效处理传感器噪声 |
适应复杂动作空间 | 支持连续控制和高维动作输出 |
二、PETS 实现步骤
我们将以 MuJoCo HalfCheetah 环境为例,展示 PETS 的实现流程:
1. 问题描述
-
环境目标:控制仿真猎豹机器人快速奔跑
-
状态空间:17 维向量(关节角度、速度等)
-
动作空间:6 维连续向量(关节扭矩)
2. 实现步骤
-
构建概率集成模型(5个独立神经网络)
-
实现基于交叉熵方法的轨迹优化器
-
设计数据收集与模型训练系统
-
在 Gymnasium 环境集成训练流程
三、代码实现
python
import gymnasium as gym
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
from collections import deque
import random
import time
# ================== 配置参数 ==================
class PETSConfig:
# 环境参数
env_name = "HalfCheetah-v5"
max_episode_steps = 200
# 训练参数
batch_size = 256
lr = 1e-3
epochs = 400
horizon = 30
num_particles = 20
num_ensembles = 5
# 网络架构
hidden_dim = 200
num_layers = 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ================== 概率集成模型 ==================
class ProbabilisticEnsemble(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.models = nn.ModuleList([
self._build_network(state_dim + action_dim, 2 * state_dim)
for _ in range(PETSConfig.num_ensembles)
])
def _build_network(self, in_dim, out_dim):
layers = []
input_dim = in_dim
for _ in range(PETSConfig.num_layers):
layers.extend([
nn.Linear(input_dim, PETSConfig.hidden_dim),
nn.SiLU(),
])
input_dim = PETSConfig.hidden_dim
layers.append(nn.Linear(input_dim, out_dim))
return nn.Sequential(*layers)
def forward(self, state, action):
x = torch.cat([state, action], dim=-1)
outputs = [model(x) for model in self.models]
means = torch.stack([out[:, :self.state_dim] for out in outputs])
logvars = torch.stack([out[:, self.state_dim:] for out in outputs])
return means, logvars
def predict(self, state, action):
""" 预测下一个状态 (支持 NumPy 输入) """
# 转换为 Tensor
state_tensor = torch.FloatTensor(state).to(PETSConfig.device)
action_tensor = torch.FloatTensor(action).to(PETSConfig.device)
# 随机选择一个集成模型
model_idx = np.random.randint(0, PETSConfig.num_ensembles)
model = self.models[model_idx]
# 前向传播
with torch.no_grad():
output = model(torch.cat([state_tensor, action_tensor], dim=-1).unsqueeze(0))
mean = output[:, :self.state_dim]
logvar = output[:, self.state_dim:]
# 从分布中采样
std = torch.exp(0.5 * logvar)
epsilon = torch.randn_like(std)
next_state = mean + epsilon * std
return next_state.cpu().numpy().squeeze(0) # 转换为 NumPy 并去除 batch 维度
# ================== 轨迹优化器 ==================
class TrajectoryOptimizer:
def __init__(self, model, state_dim, action_dim):
self.model = model
self.model.eval() # 设置为评估模式
self.state_dim = state_dim
self.action_dim = action_dim
def optimize(self, initial_state):
# 交叉熵方法实现
best_actions = np.random.uniform(-1, 1, size=(PETSConfig.horizon, self.action_dim))
for _ in range(5): # CEM 迭代次数
# 生成候选动作
noise = np.random.normal(scale=0.2, size=(100, PETSConfig.horizon, self.action_dim))
candidates = np.clip(best_actions + noise, -1, 1)
# 评估候选动作
returns = []
for action_seq in candidates:
total_reward = self._evaluate_sequence(initial_state, action_seq)
returns.append(total_reward)
# 选择精英样本
elite_idx = np.argsort(returns)[-10:]
best_actions = np.mean(candidates[elite_idx], axis=0)
return best_actions[0] # 返回第一个最优动作
def _evaluate_sequence(self, state, action_seq):
total_reward = 0
current_state = state.copy()
for action in action_seq:
next_state = self.model.predict(current_state, action)
# 使用环境真实的奖励函数(需要与您的任务匹配)
reward = self._get_reward(current_state, action, next_state)
total_reward += reward
current_state = next_state
return total_reward
def _get_reward(self, state, action, next_state):
""" 示例:HalfCheetah 的真实奖励函数 """
# 建议替换为环境提供的真实奖励计算
# 当前示例使用简单的速度追踪
forward_vel = next_state[8] # 假设第9个维度是前进速度
return forward_vel # 最大化前进速度
# ================== 训练系统 ==================
class PETSTrainer:
def __init__(self):
# 初始化 Gymnasium 环境
self.env = gym.make(
PETSConfig.env_name,
max_episode_steps=PETSConfig.max_episode_steps
)
self.state_dim = self.env.observation_space.shape[0]
self.action_dim = self.env.action_space.shape[0]
# 初始化模型
self.model = ProbabilisticEnsemble(self.state_dim, self.action_dim).to(PETSConfig.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=PETSConfig.lr)
self.trajectory_optimizer = TrajectoryOptimizer(self.model, self.state_dim, self.action_dim)
# 数据缓冲区
self.buffer = deque(maxlen=100000)
def collect_data(self, num_episodes=50):
""" 收集初始随机数据 """
for _ in range(num_episodes):
obs, _ = self.env.reset()
done = False
while not done:
action = self.env.action_space.sample()
next_obs, reward, terminated, truncated, _ = self.env.step(action)
self.buffer.append((obs, action, next_obs))
obs = next_obs
done = terminated or truncated
def train_model(self):
""" 训练概率集成模型 """
states, actions, next_states = zip(*random.sample(self.buffer, PETSConfig.batch_size))
states = torch.FloatTensor(np.array(states)).to(PETSConfig.device)
actions = torch.FloatTensor(np.array(actions)).to(PETSConfig.device)
next_states = torch.FloatTensor(np.array(next_states)).to(PETSConfig.device)
self.optimizer.zero_grad()
means, logvars = self.model(states, actions)
loss = (means - next_states).pow(2).mean() + 0.5 * logvars.exp().mean()
loss.backward()
self.optimizer.step()
return loss.item()
def evaluate(self, num_episodes=5):
""" 评估策略性能 """
total_rewards = []
for _ in range(num_episodes):
obs, _ = self.env.reset()
episode_reward = 0
done = False
while not done:
action = self.trajectory_optimizer.optimize(obs)
obs, reward, terminated, truncated, _ = self.env.step(action)
episode_reward += reward
done = terminated or truncated
total_rewards.append(episode_reward)
return np.mean(total_rewards)
# ================== 主程序 ==================
if __name__ == "__main__":
trainer = PETSTrainer()
# 第一阶段:收集初始数据
print("收集初始数据...")
trainer.collect_data(num_episodes=100)
# 第二阶段:交替训练与评估
for epoch in range(PETSConfig.epochs):
# 模型训练
loss = trainer.train_model()
# 策略评估
if (epoch + 1) % 20 == 0:
avg_reward = trainer.evaluate()
print(f"Epoch {epoch + 1:04d} | Loss: {loss:.2f} | Avg Reward: {avg_reward:.1f}")
# 保存最终模型
# torch.save(trainer.model.state_dict(), "pets_model.pth")
四、代码解析
1. 概率集成模型
python
class ProbabilisticEnsemble(nn.Module):
def __init__(self, state_dim, action_dim):
# 构建5个独立的前向预测网络
self.models = nn.ModuleList([...])
def predict(self, state, action):
# 随机选择一个模型进行概率预测
model = np.random.choice(self.models)
# 计算带噪声的状态预测
return mean + torch.exp(0.5*logvar)*torch.randn_like(mean)
2. 轨迹优化器
python
class TrajectoryOptimizer:
def optimize(self, initial_state):
# 交叉熵方法核心流程
for _ in range(5): # 迭代优化
candidates = best_actions + noise # 生成候选动作
returns = [评估动作序列(seq) for seq in candidates]
best_actions = 选择精英样本(candidates, returns)
return best_actions[0] # 返回最优首帧动作
3. 训练系统
python
class PETSTrainer:
def train_model(self):
# 从缓冲区采样数据
states, actions, next_states = zip(*random.sample(self.buffer, PETSConfig.batch_size))
# 计算双损失项:预测误差 + 方差正则化
loss = (means - next_states).pow(2).mean() + 0.5*logvars.exp().mean()
五、运行结果与优化建议
1. 典型训练日志
python
Epoch0020 | Loss: 13.34 | Avg Reward: -34.7
Epoch0040 | Loss: 12.66 | Avg Reward: -55.4
Epoch0060 | Loss: 10.36 | Avg Reward: -69.6
Epoch0080 | Loss: 7.20 | Avg Reward: -50.7
Epoch0100 | Loss: 5.40 | Avg Reward: -58.4
Epoch0120 | Loss: 4.18 | Avg Reward: -23.0
Epoch0140 | Loss: 3.30 | Avg Reward: -69.8
Epoch0160 | Loss: 2.93 | Avg Reward: -34.4
Epoch0180 | Loss: 2.18 | Avg Reward: -58.6
Epoch0200 | Loss: 2.01 | Avg Reward: -63.3
Epoch0220 | Loss: 1.81 | Avg Reward: -71.7
Epoch0240 | Loss: 1.71 | Avg Reward: -47.7
Epoch0260 | Loss: 1.56 | Avg Reward: -32.1
Epoch0280 | Loss: 1.53 | Avg Reward: -63.4
Epoch0300 | Loss: 1.38 | Avg Reward: -49.7
Epoch0320 | Loss: 1.44 | Avg Reward: -34.3
Epoch0340 | Loss: 1.50 | Avg Reward: -59.0
Epoch0360 | Loss: 1.49 | Avg Reward: -42.7
Epoch0380 | Loss: 1.33 | Avg Reward: -47.1
Epoch0400 | Loss: 1.40 | Avg Reward: -36.3
2. 性能瓶颈分析
现象 | 可能原因 | 解决方案 |
---|---|---|
损失下降但回报不提升 | 奖励函数设计不合理 | 添加存活奖励项 |
初期回报波动剧烈 | 探索不足 | 增大CEM噪声尺度 |
后期回报停滞 | 模型容量不足 | 增加网络层数和隐藏单元 |
3. 关键调参建议
python
class ImprovedConfig(Config):
num_ensembles = 7 # 增加集成模型数量
horizon = 40 # 延长规划步长
batch_size = 512 # 增大批次大小
hidden_dim = 256 # 扩展网络容量
六、总结与扩展
-
本文基于 PETS 算法实现了基于模型的强化学习,展示了其在连续控制任务中的高效性。读者可尝试以下扩展:
-
添加更复杂的环境模型(如 LSTM)
-
在真实机器人任务中测试算法
-
结合离线数据提升模型精度
在下一篇文章中,我们将探索 元强化学习(Meta RL),并实现 MAML 算法!
-
注意事项:
-
运行前需安装依赖:
bashpip install gymnasium[mujoco] torch
-
需配置MuJoCo许可证:
bashmkdir -p ~/.mujoco && cp mjkey.txt ~/.mujoco/
-
推荐使用GPU加速训练(RTX 3060及以上)
希望本文能帮助您掌握基于模型的强化学习核心方法!如遇问题,欢迎在评论区讨论交流。