在强化学习中 啥是 softmax action ?
Posted
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了在强化学习中 啥是 softmax action ?相关的知识,希望对你有一定的参考价值。
如题
也就是softmax action selection 具体的操作我明白 就是不知道该如何翻译这个名称 哪位大侠在中文文献看过的 忘不吝赐教:) 小弟多谢!
强化学习python 实现 q-learning 例四(例二改写)
将例二改写成面向对象模式,并加了环境!
不过更新环境的过程中,用到了清屏命令,play()的时候,会有点问题。learn()的时候可以勉强看到:P
0.效果图
1.完整代码
相对于例一,修改的地方:
Agent 五处:states, actions, rewards, get_valid_actions(), get_next_state()
Env 两处:__init__(), update()
import pandas as pd import random import time import pickle import pathlib import os \'\'\' 四格迷宫:
---------------
| 入口 | |
---------------
| 陷阱 | 出口 |
--------------- \'\'\' class Env(object): \'\'\'环境类\'\'\' def __init__(self): \'\'\'初始化\'\'\' self.env = list(\'--\\n#-\') def update(self, state, delay=0.1): \'\'\'更新环境,并打印\'\'\' env = self.env[:] if state > 1: state += 1 env[state] = \'o\' # 更新环境 print(\'\\r{}\'.format(\'\'.join(env)), end=\'\') time.sleep(delay) os.system(\'cls\') class Agent(object): \'\'\'个体类\'\'\' def __init__(self, alpha=0.01, gamma=0.9): \'\'\'初始化\'\'\' self.states = range(4) # 状态集。0, 1, 2, 3 四个状态 self.actions = list(\'udlr\') # 动作集。上下左右 4个动作 self.rewards = [0,0,-10,10] # 奖励集。到达位置3(出口)奖励10,位置2(陷阱)奖励-10,其他皆为0 self.alpha = alpha self.gamma = gamma self.q_table = pd.DataFrame(data=[[0 for _ in self.actions] for _ in self.states], index=self.states, columns=self.actions) def save_policy(self): \'\'\'保存Q table\'\'\' with open(\'q_table.pickle\', \'wb\') as f: # Pickle the \'data\' dictionary using the highest protocol available. pickle.dump(self.q_table, f, pickle.HIGHEST_PROTOCOL) def load_policy(self): \'\'\'导入Q table\'\'\' with open(\'q_table.pickle\', \'rb\') as f: self.q_table = pickle.load(f) def choose_action(self, state, epsilon=0.8): \'\'\'选择相应的动作。根据当前状态,随机或贪婪,按照参数epsilon\'\'\' if (random.uniform(0,1) > epsilon) or ((self.q_table.ix[state] == 0).all()): # 探索 action = random.choice(self.get_valid_actions(state)) else: action = self.q_table.ix[state].idxmax() # 利用(贪婪) return action def get_q_values(self, state): \'\'\'取状态state的所有Q value\'\'\' q_values = self.q_table.ix[state, self.get_valid_actions(state)] return q_values def update_q_value(self, state, action, next_state_reward, next_state_q_values): \'\'\'更新Q value,根据贝尔曼方程\'\'\' self.q_table.ix[state, action] += self.alpha * (next_state_reward + self.gamma * next_state_q_values.max() - self.q_table.ix[state, action]) def get_valid_actions(self, state): \'\'\'取当前状态下所有的合法动作\'\'\' valid_actions = set(self.actions) if state % 2 == 1: # 最后一列,则 valid_actions -= set([\'r\']) # 无向右的动作 if state % 2 == 0: # 最前一列,则 valid_actions -= set([\'l\']) # 无向左 if state // 2 == 1: # 最后一行,则 valid_actions -= set([\'d\']) # 无向下 if state // 2 == 0: # 最前一行,则 valid_actions -= set([\'u\']) # 无向上 return list(valid_actions) def get_next_state(self, state, action): \'\'\'对状态执行动作后,得到下一状态\'\'\' #u,d,l,r,n = -2,+2,-1,+1,0 if state % 2 != 1 and action == \'r\': # 除最后一列,皆可向右(+1) next_state = state + 1 elif state % 2 != 0 and action == \'l\': # 除最前一列,皆可向左(-1) next_state = state -1 elif state // 2 != 1 and action == \'d\': # 除最后一行,皆可向下(+2) next_state = state + 2 elif state // 2 != 0 and action == \'u\': # 除最前一行,皆可向上(-2) next_state = state - 2 else: next_state = state return next_state def learn(self, env=None, episode=1000, epsilon=0.8): \'\'\'q-learning算法\'\'\' print(\'Agent is learning...\') for _ in range(episode): current_state = self.states[0] if env is not None: # 若提供了环境,则更新之! env.update(current_state) while current_state != self.states[-1]: current_action = self.choose_action(current_state, epsilon) # 按一定概率,随机或贪婪地选择 next_state = self.get_next_state(current_state, current_action) next_state_reward = self.rewards[next_state] next_state_q_values = self.get_q_values(next_state) self.update_q_value(current_state, current_action, next_state_reward, next_state_q_values) current_state = next_state if env is not None: # 若提供了环境,则更新之! env.update(current_state) print(\'\\nok\') def play(self, env=None, delay=0.5): \'\'\'玩游戏,使用策略\'\'\' assert env != None, \'Env must be not None!\' if pathlib.Path("q_table.pickle").exists(): self.load_policy() else: print("I need to learn before playing this game.") self.learn(env, 13) self.save_policy() print(\'Agent is playing...\') current_state = self.states[0] env.update(current_state, delay) while current_state != self.states[-1]: current_action = self.choose_action(current_state, 1.) # 1., 不随机 next_state = self.get_next_state(current_state, current_action) current_state = next_state env.update(current_state, delay) print(\'\\nCongratulations, Agent got it!\') if __name__ == \'__main__\': env = Env() # 环境 agent = Agent() # 个体 agent.learn(env, episode=25, epsilon=0.6) # 先学 #agent.save_policy() # 保存所学 #agent.load_policy() # 导入所学 #agent.play(env) # 再玩
以上是关于在强化学习中 啥是 softmax action ?的主要内容,如果未能解决你的问题,请参考以下文章