MIT-TESSE / goseek-challenge

Instructions for competing in the GOSEEK challenge at ICRA 2020
67 stars 16 forks source link

EOF error in ppo example ipython notebook #17

Closed joeljosephjin closed 4 years ago

joeljosephjin commented 4 years ago

In the goseek-ppo.ipynb, when running model.learn(total_timesteps=total_timesteps, callback=save_checkpoint_callback) I get the following error:

---------------------------------------------------------------------------
EOFError                                  Traceback (most recent call last)
<ipython-input-16-a8bc6236ba26> in <module>
----> 1 model.learn(total_timesteps=total_timesteps, callback=save_checkpoint_callback)

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/ppo2/ppo2.py in learn(self, total_timesteps, callback, log_interval, tb_log_name, reset_num_timesteps)
    334                 callback.on_rollout_start()
    335                 # true_reward is the reward without discount
--> 336                 rollout = self.runner.run(callback)
    337                 # Unpack
    338                 obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = rollout

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/common/base_class.py in runner(self)
    792     def runner(self) -> AbstractEnvRunner:
    793         if self._runner is None:
--> 794             self._runner = self._make_runner()
    795         return self._runner
    796 

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/ppo2/ppo2.py in _make_runner(self)
     98     def _make_runner(self):
     99         return Runner(env=self.env, model=self, n_steps=self.n_steps,
--> 100                       gamma=self.gamma, lam=self.lam)
    101 
    102     def _get_pretrain_placeholders(self):

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/ppo2/ppo2.py in __init__(self, env, model, n_steps, gamma, lam)
    447         :param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
    448         """
--> 449         super().__init__(env=env, model=model, n_steps=n_steps)
    450         self.lam = lam
    451         self.gamma = gamma

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/common/runners.py in __init__(self, env, model, n_steps)
     29         self.batch_ob_shape = (n_envs * n_steps,) + env.observation_space.shape
     30         self.obs = np.zeros((n_envs,) + env.observation_space.shape, dtype=env.observation_space.dtype.name)
---> 31         self.obs[:] = env.reset()
     32         self.n_steps = n_steps
     33         self.states = model.initial_state

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/common/vec_env/subproc_vec_env.py in reset(self)
    118         for remote in self.remotes:
    119             remote.send(('reset', None))
--> 120         obs = [remote.recv() for remote in self.remotes]
    121         return _flatten_obs(obs, self.observation_space)
    122 

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/common/vec_env/subproc_vec_env.py in <listcomp>(.0)
    118         for remote in self.remotes:
    119             remote.send(('reset', None))
--> 120         obs = [remote.recv() for remote in self.remotes]
    121         return _flatten_obs(obs, self.observation_space)
    122 

~/miniconda3/envs/goseek/lib/python3.7/multiprocessing/connection.py in recv(self)
    248         self._check_closed()
    249         self._check_readable()
--> 250         buf = self._recv_bytes()
    251         return _ForkingPickler.loads(buf.getbuffer())
    252 

~/miniconda3/envs/goseek/lib/python3.7/multiprocessing/connection.py in _recv_bytes(self, maxsize)
    405 
    406     def _recv_bytes(self, maxsize=None):
--> 407         buf = self._recv(4)
    408         size, = struct.unpack("!i", buf.getvalue())
    409         if maxsize is not None and size > maxsize:

~/miniconda3/envs/goseek/lib/python3.7/multiprocessing/connection.py in _recv(self, size, read)
    381             if n == 0:
    382                 if remaining == size:
--> 383                     raise EOFError
    384                 else:
    385                     raise OSError("got end of file during message")

EOFError: 
joeljosephjin commented 4 years ago

And on removing the callback i.e using the commad model.learn(total_timesteps=total_timesteps) instead, this error comes up:

---------------------------------------------------------------------------
BrokenPipeError                           Traceback (most recent call last)
<ipython-input-32-c1c38248fd91> in <module>
----> 1 model.learn(total_timesteps=total_timesteps)#, callback=save_checkpoint_callback)

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/ppo2/ppo2.py in learn(self, total_timesteps, callback, log_interval, tb_log_name, reset_num_timesteps)
    334                 callback.on_rollout_start()
    335                 # true_reward is the reward without discount
--> 336                 rollout = self.runner.run(callback)
    337                 # Unpack
    338                 obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = rollout

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/common/base_class.py in runner(self)
    792     def runner(self) -> AbstractEnvRunner:
    793         if self._runner is None:
--> 794             self._runner = self._make_runner()
    795         return self._runner
    796 

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/ppo2/ppo2.py in _make_runner(self)
     98     def _make_runner(self):
     99         return Runner(env=self.env, model=self, n_steps=self.n_steps,
--> 100                       gamma=self.gamma, lam=self.lam)
    101 
    102     def _get_pretrain_placeholders(self):

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/ppo2/ppo2.py in __init__(self, env, model, n_steps, gamma, lam)
    447         :param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
    448         """
--> 449         super().__init__(env=env, model=model, n_steps=n_steps)
    450         self.lam = lam
    451         self.gamma = gamma

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/common/runners.py in __init__(self, env, model, n_steps)
     29         self.batch_ob_shape = (n_envs * n_steps,) + env.observation_space.shape
     30         self.obs = np.zeros((n_envs,) + env.observation_space.shape, dtype=env.observation_space.dtype.name)
---> 31         self.obs[:] = env.reset()
     32         self.n_steps = n_steps
     33         self.states = model.initial_state

~/miniconda3/envs/goseek/lib/python3.7/site-packages/stable_baselines/common/vec_env/subproc_vec_env.py in reset(self)
    117     def reset(self):
    118         for remote in self.remotes:
--> 119             remote.send(('reset', None))
    120         obs = [remote.recv() for remote in self.remotes]
    121         return _flatten_obs(obs, self.observation_space)

~/miniconda3/envs/goseek/lib/python3.7/multiprocessing/connection.py in send(self, obj)
    204         self._check_closed()
    205         self._check_writable()
--> 206         self._send_bytes(_ForkingPickler.dumps(obj))
    207 
    208     def recv_bytes(self, maxlength=None):

~/miniconda3/envs/goseek/lib/python3.7/multiprocessing/connection.py in _send_bytes(self, buf)
    402             # Also note we want to avoid sending a 0-length buffer separately,
    403             # to avoid "broken pipe" errors if the other end closed the pipe.
--> 404             self._send(header + buf)
    405 
    406     def _recv_bytes(self, maxsize=None):

~/miniconda3/envs/goseek/lib/python3.7/multiprocessing/connection.py in _send(self, buf, write)
    366         remaining = len(buf)
    367         while True:
--> 368             n = write(self._handle, buf)
    369             remaining -= n
    370             if remaining == 0:

BrokenPipeError: [Errno 32] Broken pipe
joeljosephjin commented 4 years ago

This was solved when I did export DISPLAY=:0 on the shell in which i intitialized the jupyter notebook.

One can see that the error is due to the simulation being refused to open up, if you look at the logs in the shell while jupyter is running.

ZacRavichandran commented 4 years ago

Thanks for letting us know, that makes sense. When the display is not set, the simulator can't initialize property.

Glad that solved the issue!