强化学习之REINFORECE策略梯度算法——已CartPole环境为例

整体代码如下:

python 复制代码
import gym
import numpy as np
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
def moving_average(a, window_size):
    cumulative_sum = np.cumsum(np.insert(a, 0, 0)) 
    middle = (cumulative_sum[window_size:] - cumulative_sum[:-window_size]) / window_size
    r = np.arange(1, window_size-1, 2)
    begin = np.cumsum(a[:window_size-1])[::2] / r
    end = (np.cumsum(a[:-window_size:-1])[::2] / r)[::-1]
    return np.concatenate((begin, middle, end))
class PolicyNetwork(torch.nn.Module):
    def __init__(self,statedim,hiddendim,actiondim):
        super(PolicyNetwork,self).__init__()
        self.cf1=torch.nn.Linear(statedim,hiddendim)
        self.cf2=torch.nn.Linear(hiddendim,actiondim)
    def forward(self,x):
        x=torch.nn.functional.relu(self.cf1(x))
        return torch.nn.functional.softmax(self.cf2(x),dim=1)
class REINFORCE:
    def __init__(self,statedim,hiddendim,actiondim,learningrate,gamma,device):
        self.policynet=PolicyNetwork(statedim,hiddendim,actiondim).to(device)
        self.gamma=gamma
        self.device=device
        self.optimizer=torch.optim.Adam(self.policynet.parameters(),lr=learningrate)
    def takeaction(self,state):
        state=torch.tensor([state],dtype=torch.float).to(self.device)
        probs=self.policynet(state)
        actiondist=torch.distributions.Categorical(probs)#torch.distributions.Categorical:这是 PyTorch 中用于表示类别分布的类,可以使用 actiondist.sample() 方法从这个分布中随机采样一个类别
        action=actiondist.sample()
        return action.item()
    def update(self,transitiondist):
        statelist=transitiondist['states']
        rewardlist=transitiondist['rewards']
        actionlist=transitiondist['actions']
        G=0
        self.optimizer.zero_grad()
        for i in reversed(range(len(rewardlist))):#从最后一步计算起
            reward=rewardlist[i]
            state=statelist[i]
            action=actionlist[i]
            state=torch.tensor([state],dtype=torch.float).to(self.device)
            action=torch.tensor([action]).view(-1,1).to(self.device)
            logprob=torch.log(self.policynet(state).gather(1,action)) #.gather(1, action) 方法从策略网络的输出中提取对应于特定动作 action 的概率值。这里的 1 表示沿着维度 1(通常对应于动作维度)进行索引。
            G=self.gamma*G+reward
            loss=-logprob*G#每一步的损失函数
            loss.backward()#反向传播计算梯度
        self.optimizer.step()#更新参数,梯度下降

learningrate=4e-3
episodesnum=1000
hiddendim=128
gamma=0.99
pbarnum=10
printreturnnum=10
device=torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
env=gym.make('CartPole-v1')
env.reset(seed=880)
torch.manual_seed(880)
statedim=env.observation_space.shape[0]
actiondim=env.action_space.n
agent=REINFORCE(statedim=statedim,hiddendim=hiddendim,actiondim=actiondim,learningrate=learningrate,gamma=gamma,device=device)
returnlist=[]
for  k in range(pbarnum):
    with tqdm(total=int(episodesnum/pbarnum),desc='Iteration %d'%k)as pbar:
        for episode in range(int(episodesnum/pbarnum)):
            g=0
            transitiondist={'states':[],'actions':[],'nextstates':[],'rewards':[]}
            state,_=env.reset(seed=880)
            done=False
            while not done:
                action=agent.takeaction(state)
                nextstate,reward,done,truncated,_=env.step(action)
                done=done or truncated

                transitiondist['states'].append(state)
                transitiondist['actions'].append(action)
                transitiondist['nextstates'].append(nextstate)
                transitiondist['rewards'].append(reward)
                state=nextstate
                g=g+reward
            returnlist.append(g)
            agent.update(transitiondist)
            if (episode+1)%(printreturnnum)==0:
                pbar.set_postfix({'Episode':'%d'%(episodesnum//pbarnum+episode+1),'Return':'%.3f'%np.mean(returnlist[-printreturnnum:])})
            pbar.update(1)

episodelist=list(range(len(returnlist)))
plt.plot(episodelist,returnlist)
plt.xlabel('Episodes')
plt.ylabel('Returns')
plt.title('REINFORCE on {}'.format(env.spec.name))     
plt.show()
mvreturn=moving_average(returnlist,9)
plt.plot(episodelist,mvreturn)
plt.xlabel('Episodes')
plt.ylabel('Returns')
plt.title('REINFORCE on {}'.format(env.spec.name))
plt.show()  

                
            

效果:

相关推荐
独正己身5 分钟前
代码随想录day4
数据结构·c++·算法
IT古董32 分钟前
【深度学习】常见模型-Transformer模型
人工智能·深度学习·transformer
沐雪架构师1 小时前
AI大模型开发原理篇-2:语言模型雏形之词袋模型
人工智能·语言模型·自然语言处理
摸鱼仙人~2 小时前
Attention Free Transformer (AFT)-2020论文笔记
论文阅读·深度学习·transformer
python算法(魔法师版)2 小时前
深度学习深度解析:从基础到前沿
人工智能·深度学习
小王子10243 小时前
设计模式Python版 组合模式
python·设计模式·组合模式
kakaZhui3 小时前
【llm对话系统】大模型源码分析之 LLaMA 位置编码 RoPE
人工智能·深度学习·chatgpt·aigc·llama
利刃大大3 小时前
【回溯+剪枝】找出所有子集的异或总和再求和 && 全排列Ⅱ
c++·算法·深度优先·剪枝
charlie1145141913 小时前
从0开始使用面对对象C语言搭建一个基于OLED的图形显示框架(协议层封装)
c语言·驱动开发·单片机·学习·教程·oled
struggle20254 小时前
一个开源 GenBI AI 本地代理(确保本地数据安全),使数据驱动型团队能够与其数据进行互动,生成文本到 SQL、图表、电子表格、报告和 BI
人工智能·深度学习·目标检测·语言模型·自然语言处理·数据挖掘·集成学习