强化学习案例复现(1)--- MountainCar基于Q-learning

1 搭建环境

1.1 gym自带

python 复制代码
import gym

# Create environment
env = gym.make("MountainCar-v0")

eposides = 10
for eq in range(eposides):
    obs = env.reset()
    done = False
    rewards = 0
    while not done:
        action = env.action_space.sample()
        obs, reward, done, action, info = env.step(action)
        env.render()
        rewards += reward
    print(rewards)

1.2 自行搭建(建议用该方法)

按照下文搭建MountainCar环境

往期文章:强化学习实践(三)基于gym搭建自己的环境(在gym0.26.2可运行)-CSDN博客

2.基于Q-learning的模型训练

python 复制代码
import gym
import numpy as np

env = gym.make("GridWorld-v0")

# Q-Learning settings
LEARNING_RATE = 0.1 #学习率
DISCOUNT = 0.95  #奖励折扣系数
EPISODES = 100  #迭代次数

SHOW_EVERY = 1000

# Exploration settings
epsilon = 1  # not a constant, qoing to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)

DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE

print(discrete_os_win_size)


def get_discrete_state(state):

    discrete_state = (state - env.observation_space.low)/discrete_os_win_size

   # discrete_state = np.array(state - env.observation_space.low, dtype=float) / discrete_os_win_size

    return tuple(discrete_state.astype(np.int64))  # we use this tuple to look up the 3 Q values for the available actions in the q-


q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))


for episode in range(EPISODES):
    state = env.reset()
    discrete_state = get_discrete_state(state)

    if episode % SHOW_EVERY == 0:
        render = True
        print(episode)
    else:
        render = False

    done = False
    while not done:
        if np.random.random() > epsilon:
            # Get action from Q table
            action = np.argmax(q_table[discrete_state])
        else:
            # Get random action
            action = np.random.randint(0, env.action_space.n)

        new_state, reward, done, _, c = env.step(action)
        new_discrete_state = get_discrete_state(new_state)

        # If simulation did not end yet after last step - update Q table
        if not done:
            # Maximum possible Q value in next step (for new state)
            max_future_q = np.max(q_table[new_discrete_state])
            # Current Q value (for current state and performed action)
            current_q = q_table[discrete_state + (action,)]
            # And here's our equation for a new Q value for current state and action
            new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
            # Update Q table with new Q value
            q_table[discrete_state + (action,)] = new_q
            # Simulation ended (for any reson) - if goal position is achived - update Q value with reward directly

        elif new_state[0] >= env.goal_position:
            # q_table[discrete_state + (action,)] = reward
            q_table[discrete_state + (action,)] = 0
            print("we made it on episode {}".format(episode))

        discrete_state = new_discrete_state

        if render:
            env.render()

    # Decaying is being done every episode if episode number is within decaying range
    if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
        epsilon -= epsilon_decay_value

np.save("q_table.npy", arr=q_table)

env.close()

3.模型测试

python 复制代码
import gym
import numpy as np


env = gym.make("GridWorld-v0")

# Q-Learning settings
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 10

DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE

def get_discrete_state(state):
    discrete_state = (state - env.observation_space.low)/discrete_os_win_size
    return tuple(discrete_state.astype(np.int64))  # we use this tuple to look up the 3 Q values for the available actions in the q-

q_table = np.load(file="q_table.npy")

for episode in range(EPISODES):
    state = env.reset()
    discrete_state = get_discrete_state(state)

    rewards = 0
    done = False
    while not done:
        # Get action from Q table
        action = np.argmax(q_table[discrete_state])
        new_state, reward, done, _, c = env.step(action)
        new_discrete_state = get_discrete_state(new_state)

        rewards += reward

        # If simulation did not end yet after last step - update Q table
        if done and new_state[0] >= env.goal_position:
            print("we made it on episode {}, rewards {}".format(episode, rewards))

        discrete_state = new_discrete_state
        env.render()

env.close()
相关推荐
Madison-No714 分钟前
【C++】探秘string的底层实现
开发语言·c++
java1234_小锋32 分钟前
TensorFlow2 Python深度学习 - TensorFlow2框架入门 - 神经网络基础原理
python·深度学习·tensorflow·tensorflow2
JJJJ_iii33 分钟前
【深度学习03】神经网络基本骨架、卷积、池化、非线性激活、线性层、搭建网络
网络·人工智能·pytorch·笔记·python·深度学习·神经网络
JJJJ_iii42 分钟前
【深度学习05】PyTorch:完整的模型训练套路
人工智能·pytorch·python·深度学习
lly2024061 小时前
AJAX JSON 实例
开发语言
QiZhang | UESTC1 小时前
JAVA算法练习题day27
java·开发语言·c++·算法·leetcode·hot100
坚持就完事了1 小时前
2-C语言中的数据类型
c语言·开发语言
ss2732 小时前
手写MyBatis第96弹:异常断点精准捕获MyBatis深层BUG
java·开发语言·bug·mybatis
程序员小远2 小时前
常用的测试用例
自动化测试·软件测试·python·功能测试·测试工具·职场和发展·测试用例
IT学长编程2 小时前
计算机毕业设计 基于EChants的海洋气象数据可视化平台设计与实现 Python 大数据毕业设计 Hadoop毕业设计选题【附源码+文档报告+安装调试】
大数据·hadoop·python·毕业设计·课程设计·毕业论文·海洋气象数据可视化平台