class PPO(nn.Module):
def __init__(self):
super(PPO, self).__init__()
self.data = []
self.fc1 = nn.Linear(4, 256)
self.fc_pi = nn.Linear(256, 2) # 策略头 (Actor)
self.fc_v = nn.Linear(256, 1) # 价值头 (Critic)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def pi(self, x, softmax_dim=0):
x = F.relu(self.fc1(x))
x = self.fc_pi(x)
probs = F.softmax(x, dim=softmax_dim)
return probs
def v(self, x):
x = F.relu(self.fc1(x))
v = self.fc_v(x)
return v
def put_data(self, transition):
self.data.append(transition)
def make_batch(self):
s_lst, a_lst, r_lst, s_prime_lst, prob_a_lst, done_lst = [], [], [], [], [], []
for transition in self.data:
s, a, r, s_prime, prob_a, done = transition
s_lst.append(s); a_lst.append([a]); r_lst.append([r])
s_prime_lst.append(s_prime); prob_a_lst.append([prob_a]); done_lst.append([done])
s, a, r, s_prime, done, prob_a = torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), \
torch.tensor(r_lst), torch.tensor(s_prime_lst, dtype=torch.float), \
torch.tensor(done_lst, dtype=torch.float), torch.tensor(prob_a_lst)
self.data = []
return s, a, r, s_prime, done, prob_a
def train_net(self):
s, a, r, s_prime, done, prob_a = self.make_batch()
for i in range(K_epochs):
# 计算 TD Target 和 Advantage (GAE 简化版)
td_target = r + gamma * self.v(s_prime) * (1 - done)
delta = td_target - self.v(s)
delta = delta.detach().numpy()
advantage_lst = []
adv = 0.0
for delta_t in delta[::-1]:
adv = gamma * lmbda * adv + delta_t[0]
advantage_lst.append([adv])
advantage_lst.reverse()
advantage = torch.tensor(advantage_lst, dtype=torch.float)
# 计算 Ratio
pi = self.pi(s, softmax_dim=1)
pi_a = pi.gather(1, a)
ratio = torch.exp(torch.log(pi_a) - torch.log(prob_a))
# PPO 核心损失函数
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1-eps_clip, 1+eps_clip) * advantage
loss = -torch.min(surr1, surr2) + F.smooth_l1_loss(self.v(s) , td_target.detach())
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
主循环
python复制代码
# --- 主循环 ---
def main():
env = gym.make('CartPole-v1')
model = PPO()
score = 0.0
for n_epi in range(1000):
s = env.reset()[0] if isinstance(env.reset(), tuple) else env.reset()
done = False
while not done:
for t in range(T_horizon):
prob = model.pi(torch.from_numpy(s).float())
m = torch.distributions.Categorical(prob)
a = m.sample().item()
step_result = env.step(a)
if len(step_result) == 5:
s_prime, r, terminated, truncated, info = step_result
done = terminated or truncated
else:
s_prime, r, done, info = step_result
model.put_data((s, a, r/100.0, s_prime, prob[a].item(), done))
s = s_prime
score += r
if done: break
model.train_net()
if n_epi % 20 == 0 and n_epi != 0:
print(f"# Episode: {n_epi}, Avg Score: {score/20}")
score = 0.0
env.close()
if __name__ == '__main__':
main()