IntelLabs / coach

Reinforcement Learning Coach by Intel AI Lab enables easy experimentation with state of the art Reinforcement Learning algorithms
https://intellabs.github.io/coach/
Apache License 2.0
2.33k stars 462 forks source link

Error in Transition constructor #453

Closed Boubside closed 4 years ago

Boubside commented 4 years ago

Coach version : 1.0.0 Linux version : 18.04 Environment : Custom Algorithm : Clipped PPO

I'm using a custom environment to train a basic_rl_graph with the clipped PPO algorithm. During the heat up, i get and error in the init of a Transition object. Here is the full Traceback :

Traceback (most recent call last):
  File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
    "__main__", mod_spec)
  File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
    exec(code, run_globals)
  File "/home/sebastien/.local/lib/python3.6/site-packages/rl_agent/local_training_worker.py", line 57, in <module>
    main()
  File "/home/sebastien/.local/lib/python3.6/site-packages/rl_agent/local_training_worker.py", line 51, in main
    start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
  File "/home/sebastien/.local/lib/python3.6/site-packages/rl_agent/local_training_worker.py", line 28, in start_graph
    graph_manager.improve()
  File "/home/sebastien/.local/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 539, in improve
    self.heatup(self.heatup_steps)
  File "/home/sebastien/.local/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 389, in heatup
    self.act(EnvironmentEpisodes(1))
  File "/home/sebastien/.local/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 449, in act
    result = self.top_level_manager.step(None)
  File "/home/sebastien/.local/lib/python3.6/site-packages/rl_coach/level_manager.py", line 239, in step
    done = acting_agent.observe(env_response)
  File "/home/sebastien/.local/lib/python3.6/site-packages/rl_coach/agents/agent.py", line 927, in observe
    game_over=filtered_env_response.game_over, info=filtered_env_response.info)
  File "/home/sebastien/.local/lib/python3.6/site-packages/rl_coach/core_types.py", line 214, in __init__
    if not next_state:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()

And here is the terminal output of print(self.last_env_response) called in the _update_state() method of the environment just before the error occur:

{'_next_state': array([0.        , 1.        , 0.        , 0.        , 0.        ,
       0.        , 0.        , 0.        , 0.        , 0.        ,
       0.        , 0.        , 0.        , 0.        , 0.        ,
       0.        , 0.        , 0.        , 0.        , 0.        ,
       0.54580179, 0.90670458, 1.        , 0.29192448, 0.28100342,
       0.3942192 , 0.39484474, 0.40357855, 0.4234652 , 0.45741529,
       0.51113771, 0.5959566 , 0.7365733 , 0.98403327, 1.        ,
       1.        , 1.        , 1.        , 1.        , 0.99457982]), '_reward': -1.0, '_game_over': False, '_goal': None, 'info': {}}

As I understand it, the next_state is supposed to be the state array, so the line if not next_state: in the Transition constructor is in fact problematic. Did I miss understood the definition of states ?

Feel free to ask for more information. Thanks a lot.

Here are a few code snippets for further information. My preset file:

from rl_coach.agents.clipped_ppo_agent import ClippedPPOAgentParameters
from rl_coach.base_parameters import VisualizationParameters, PresetValidationParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.schedules import LinearSchedule

from rl_agent.env.oarl import OARLEnvParameters

####################
# Graph Scheduling #
####################

schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(1000000000)
schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(40)
schedule_params.evaluation_steps = EnvironmentEpisodes(5)
schedule_params.heatup_steps = EnvironmentEpisodes(10)

#########
# Agent #
#########
agent_params = ClippedPPOAgentParameters()

agent_params.network_wrappers['main'].learning_rate = 0.0003
agent_params.network_wrappers['main'].input_embedders_parameters['observation'].activation_function = 'relu'
agent_params.network_wrappers['main'].batch_size = 64
agent_params.network_wrappers['main'].optimizer_epsilon = 1e-5
agent_params.network_wrappers['main'].adam_optimizer_beta2 = 0.999

agent_params.algorithm.clip_likelihood_ratio_using_epsilon = 0.2
agent_params.algorithm.clipping_decay_schedule = LinearSchedule(1.0, 0, 100000)
agent_params.algorithm.beta_entropy = 0.01  # also try 0.001
agent_params.algorithm.gae_lambda = 0.95
agent_params.algorithm.discount = 0.999
agent_params.algorithm.optimization_epochs = 10
agent_params.algorithm.estimate_state_value_using_gae = True
agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentEpisodes(30)
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentEpisodes(30)
agent_params.memory.max_size = (MemoryGranularity.Transitions, 10**5)

###############
# Environment #
###############

env_params = OARLEnvParameters()
# env_params.level = 'RoboMaker-oarl-v0'

vis_params = VisualizationParameters(render=False)

########
# Test #
########
preset_validation_params = PresetValidationParameters()
preset_validation_params.test = True
preset_validation_params.min_reward_threshold = 10000
preset_validation_params.max_episodes_to_achieve_reward = 100000

graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
                                    schedule_params=schedule_params, vis_params=vis_params,
                                    preset_validation_params=preset_validation_params)

The custom environment (just the high level code ):

from __future__ import print_function
from pathlib import Path
from pkg_resources import Requirement, resource_filename
from typing import Union

from rl_coach import spaces
from rl_coach.base_parameters import VisualizationParameters
from rl_coach.core_types import EnvResponse
from rl_coach.environments.environment import Environment, LevelSelection, EnvironmentParameters
from rl_coach.filters.filter import NoInputFilter, NoOutputFilter

import importlib
import json
import rl_agent.config as cfg
from rl_agent.env.obstacles.orchestrator import Orchestrator

from pkg_resources import resource_string

class OARLEnvParameters(EnvironmentParameters):

    def __init__(self):
        super().__init__()
        self.default_input_filter = NoInputFilter()
        self.default_output_filter = NoOutputFilter()

    @property
    def path(self):
        return 'rl_agent.env.oarl:OARLEnv'

class OARLEnv(Environment):

    # def __init__(self, visualization_parameters: VisualizationParameters, seed: Union[None, int]=None, human_control: bool=False, custom_reward_threshold: Union[int, float]=None, **kwargs):
    def __init__(self, visualization_parameters: VisualizationParameters, seed: Union[None, int]=None, human_control: bool=False, custom_reward_threshold: Union[int, float]=None, **kwargs):
        super().__init__(LevelSelection(''), 0, 0, False, 0, VisualizationParameters())

        # Loading json config and loading modules
        state_module = 'rl_agent.env.state.' + cfg.state_config['module']
        reward_module = 'rl_agent.env.reward.' + cfg.reward_config['module']
        action_module = 'rl_agent.env.action.' + cfg.action_config['module']
        state_lib = importlib.import_module(state_module)
        reward_lib = importlib.import_module(reward_module)
        action_lib = importlib.import_module(action_module)
        self.state_obj = state_lib.State(cfg.state_config)
        self.reward_obj = reward_lib.Reward(cfg.reward_config)
        self.action_obj = action_lib.ActionSpace(cfg.action_config)
        self.orchestrator = Orchestrator(cfg.obstacle_config)

        # Creating action and state spaces
        self.state_space = self.state_obj.getStateSpace()
        self.action_space = self.action_obj.getActionSpace()

        # Spawning env
        self.orchestrator.spawn()
        self._restart_environment_episode()

    # Perform the action passed as argument, and stores env response in self.last_obs
    def _take_action(self, action):
        self.last_obs = self.action_obj.act(action)

    # Update the state (self.state, self.reward, self.done, self.goal, self.info) based on self.last_obs
    def _update_state(self):
        # Computing state reward and done signal
        if self.last_obs is not None:
            self.state = self.state_obj.computeState(self.last_obs)
            self.reward, self.done = self.reward_obj.computeReward(self.last_obs)
            self.goal = None
        else:
            # If observation is None (probably because this is a training worker) we sample the observation space instead
            self.state = self.state_obj.sample()
            self.reward = 0
            self.done = False
            self.goal = None

        # Updating state space and las_env_response
        self.state_space = self.state_obj.getStateSpace()
        print(self.last_env_response)

    # Resart the episode and set self.last_obs with the first obs
    def _restart_environment_episode(self, force_environment_reset=False):
        self.orchestrator.move()
        self.step(0)
        # self.orchestrator.reset_sim_time()

if __name__=='__main__':
    w = OARLEnv()
Boubside commented 4 years ago

Ok I found my error, next_state is actually ad dictionary with the state under the "observation" key, sorry for the useless issue.