强化学习案例复现(1)--- MountainCar基于Q-learning

1 搭建环境

1.1 gym自带

python 复制代码
import gym

# Create environment
env = gym.make("MountainCar-v0")

eposides = 10
for eq in range(eposides):
    obs = env.reset()
    done = False
    rewards = 0
    while not done:
        action = env.action_space.sample()
        obs, reward, done, action, info = env.step(action)
        env.render()
        rewards += reward
    print(rewards)

1.2 自行搭建(建议用该方法)

按照下文搭建MountainCar环境

往期文章:强化学习实践(三)基于gym搭建自己的环境(在gym0.26.2可运行)-CSDN博客

2.基于Q-learning的模型训练

python 复制代码
import gym
import numpy as np

env = gym.make("GridWorld-v0")

# Q-Learning settings
LEARNING_RATE = 0.1 #学习率
DISCOUNT = 0.95  #奖励折扣系数
EPISODES = 100  #迭代次数

SHOW_EVERY = 1000

# Exploration settings
epsilon = 1  # not a constant, qoing to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)

DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE

print(discrete_os_win_size)


def get_discrete_state(state):

    discrete_state = (state - env.observation_space.low)/discrete_os_win_size

   # discrete_state = np.array(state - env.observation_space.low, dtype=float) / discrete_os_win_size

    return tuple(discrete_state.astype(np.int64))  # we use this tuple to look up the 3 Q values for the available actions in the q-


q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))


for episode in range(EPISODES):
    state = env.reset()
    discrete_state = get_discrete_state(state)

    if episode % SHOW_EVERY == 0:
        render = True
        print(episode)
    else:
        render = False

    done = False
    while not done:
        if np.random.random() > epsilon:
            # Get action from Q table
            action = np.argmax(q_table[discrete_state])
        else:
            # Get random action
            action = np.random.randint(0, env.action_space.n)

        new_state, reward, done, _, c = env.step(action)
        new_discrete_state = get_discrete_state(new_state)

        # If simulation did not end yet after last step - update Q table
        if not done:
            # Maximum possible Q value in next step (for new state)
            max_future_q = np.max(q_table[new_discrete_state])
            # Current Q value (for current state and performed action)
            current_q = q_table[discrete_state + (action,)]
            # And here's our equation for a new Q value for current state and action
            new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
            # Update Q table with new Q value
            q_table[discrete_state + (action,)] = new_q
            # Simulation ended (for any reson) - if goal position is achived - update Q value with reward directly

        elif new_state[0] >= env.goal_position:
            # q_table[discrete_state + (action,)] = reward
            q_table[discrete_state + (action,)] = 0
            print("we made it on episode {}".format(episode))

        discrete_state = new_discrete_state

        if render:
            env.render()

    # Decaying is being done every episode if episode number is within decaying range
    if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
        epsilon -= epsilon_decay_value

np.save("q_table.npy", arr=q_table)

env.close()

3.模型测试

python 复制代码
import gym
import numpy as np


env = gym.make("GridWorld-v0")

# Q-Learning settings
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 10

DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE

def get_discrete_state(state):
    discrete_state = (state - env.observation_space.low)/discrete_os_win_size
    return tuple(discrete_state.astype(np.int64))  # we use this tuple to look up the 3 Q values for the available actions in the q-

q_table = np.load(file="q_table.npy")

for episode in range(EPISODES):
    state = env.reset()
    discrete_state = get_discrete_state(state)

    rewards = 0
    done = False
    while not done:
        # Get action from Q table
        action = np.argmax(q_table[discrete_state])
        new_state, reward, done, _, c = env.step(action)
        new_discrete_state = get_discrete_state(new_state)

        rewards += reward

        # If simulation did not end yet after last step - update Q table
        if done and new_state[0] >= env.goal_position:
            print("we made it on episode {}, rewards {}".format(episode, rewards))

        discrete_state = new_discrete_state
        env.render()

env.close()
相关推荐
网络风云28 分钟前
golang中的包管理-下--详解
开发语言·后端·golang
小唐C++1 小时前
C++小病毒-1.0勒索
开发语言·c++·vscode·python·算法·c#·编辑器
S-X-S1 小时前
集成Sleuth实现链路追踪
java·开发语言·链路追踪
北 染 星 辰1 小时前
Python网络自动化运维---用户交互模块
开发语言·python·自动化
codists1 小时前
《CPython Internals》阅读笔记:p336-p352
python
佳心饼干-1 小时前
数据结构-栈
开发语言·数据结构
我们的五年1 小时前
【C语言学习】:C语言补充:转义字符,<<,>>操作符,IDE
c语言·开发语言·后端·学习
灯火不休ᝰ2 小时前
[java] java基础-字符串篇
java·开发语言·string
Мартин.2 小时前
[Meachines] [Easy] GoodGames SQLI+Flask SSTI+Docker逃逸权限提升
python·docker·flask
励志去大厂的菜鸟2 小时前
系统相关类——java.lang.Math (三)(案例详细拆解小白友好)
java·服务器·开发语言·深度学习·学习方法