The Unity Machine Learning Agents Toolkit (ML-Agents) is an open-source project that enables games and simulations to serve as environments for training intelligent agents using deep reinforcement learning and imitation learning.
I'm creating this video game that has artificial intelligence as enemies. I had just started the training session and everything started fine, about 10 seconds later it gave me this error:
C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\torch_entities\utils.py:289: UserWarning: The use of x.T on tensors of dimension other than 2 to reverse their shape is deprecated and it will throw an error in a future release. Consider x.mT to transpose batches of matrices or x.permute(*torch.arange(x.ndim - 1, -1, -1)) to reverse the dimensions of a tensor. (Triggered internally at C:\actions-runner_work\pytorch\pytorch\builder\windows\pytorch\aten\src\ATen\native\TensorShape.cpp:3641.)
torch.nn.functional.one_hot(_act.T, action_size[i]).float()
C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\torch\onnx\symbolic_opset9.py:4662: UserWarning: Exporting a model to ONNX with a batch_size other than 1, with a variable length with LSTM can cause an error when running the ONNX model with a different batch size. Make sure to save the model with a batch size of 1, or define the initial states (h0/c0) as inputs of the model.
warnings.warn(
[INFO] Exported results\=Jolleen2\Jolleen\Jolleen-1152.onnx
[INFO] Copied results\=Jolleen2\Jolleen\Jolleen-1152.onnx to results\=Jolleen2\Jolleen.onnx.
[INFO] Exported results\=Jolleen2\TrainingPlayer\TrainingPlayer-192.onnx
[INFO] Copied results\=Jolleen2\TrainingPlayer\TrainingPlayer-192.onnx to results\=Jolleen2\TrainingPlayer.onnx.
Traceback (most recent call last):
File "C:\Users\Utente\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\Utente\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\Utente\Fight For Life\MLvenv\Scripts\mlagents-learn.exe__main__.py", line 7, in
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\learn.py", line 264, in main
run_cli(parse_command_line())
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\learn.py", line 260, in run_cli
run_training(run_seed, options, num_areas)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\learn.py", line 136, in run_training
tc.start_learning(env_manager)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents_envs\timers.py", line 305, in wrapped
return func(*args, kwargs)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\trainer_controller.py", line 175, in start_learning
n_steps = self.advance(env_manager)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents_envs\timers.py", line 305, in wrapped
return func(*args, *kwargs)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\trainer_controller.py", line 250, in advance
trainer.advance()
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\ghost\trainer.py", line 254, in advance
self.trainer.advance()
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\trainer\rl_trainer.py", line 302, in advance
if self._update_policy():
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents_envs\timers.py", line 305, in wrapped
return func(args, kwargs)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\trainer\off_policy_trainer.py", line 211, in _update_policy
update_stats = self.optimizer.update(sampled_minibatch, n_sequences)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents_envs\timers.py", line 305, in wrapped
return func(*args, **kwargs)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\sac\optimizer_torch.py", line 573, in update
q1_stream = self._condense_q_streams(q1_out, disc_actions)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\sac\optimizer_torch.py", line 467, in _condense_q_streams
branched_q = ModelUtils.break_into_branches(
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\torch_entities\utils.py", line 270, in break_into_branches
branched_logits = [
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\torch_entities\utils.py", line 271, in
concatenated_logits[:, action_idx[i] : action_idx[i + 1]]
IndexError: too many indices for tensor of dimension 1
I'm creating this video game that has artificial intelligence as enemies. I had just started the training session and everything started fine, about 10 seconds later it gave me this error:
C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\torch_entities\utils.py:289: UserWarning: The use of
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\learn.py", line 264, in main
run_cli(parse_command_line())
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\learn.py", line 260, in run_cli
run_training(run_seed, options, num_areas)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\learn.py", line 136, in run_training
tc.start_learning(env_manager)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents_envs\timers.py", line 305, in wrapped
return func(*args, kwargs)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\trainer_controller.py", line 175, in start_learning
n_steps = self.advance(env_manager)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents_envs\timers.py", line 305, in wrapped
return func(*args, *kwargs)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\trainer_controller.py", line 250, in advance
trainer.advance()
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\ghost\trainer.py", line 254, in advance
self.trainer.advance()
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\trainer\rl_trainer.py", line 302, in advance
if self._update_policy():
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents_envs\timers.py", line 305, in wrapped
return func(args, kwargs)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\trainer\off_policy_trainer.py", line 211, in _update_policy
update_stats = self.optimizer.update(sampled_minibatch, n_sequences)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents_envs\timers.py", line 305, in wrapped
return func(*args, **kwargs)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\sac\optimizer_torch.py", line 573, in update
q1_stream = self._condense_q_streams(q1_out, disc_actions)
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\sac\optimizer_torch.py", line 467, in _condense_q_streams
branched_q = ModelUtils.break_into_branches(
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\torch_entities\utils.py", line 270, in break_into_branches
branched_logits = [
File "C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\mlagents\trainers\torch_entities\utils.py", line 271, in
concatenated_logits[:, action_idx[i] : action_idx[i + 1]]
IndexError: too many indices for tensor of dimension 1
x.T
on tensors of dimension other than 2 to reverse their shape is deprecated and it will throw an error in a future release. Considerx.mT
to transpose batches of matrices orx.permute(*torch.arange(x.ndim - 1, -1, -1))
to reverse the dimensions of a tensor. (Triggered internally at C:\actions-runner_work\pytorch\pytorch\builder\windows\pytorch\aten\src\ATen\native\TensorShape.cpp:3641.) torch.nn.functional.one_hot(_act.T, action_size[i]).float() C:\Users\Utente\Fight For Life\MLvenv\lib\site-packages\torch\onnx\symbolic_opset9.py:4662: UserWarning: Exporting a model to ONNX with a batch_size other than 1, with a variable length with LSTM can cause an error when running the ONNX model with a different batch size. Make sure to save the model with a batch size of 1, or define the initial states (h0/c0) as inputs of the model. warnings.warn( [INFO] Exported results\=Jolleen2\Jolleen\Jolleen-1152.onnx [INFO] Copied results\=Jolleen2\Jolleen\Jolleen-1152.onnx to results\=Jolleen2\Jolleen.onnx. [INFO] Exported results\=Jolleen2\TrainingPlayer\TrainingPlayer-192.onnx [INFO] Copied results\=Jolleen2\TrainingPlayer\TrainingPlayer-192.onnx to results\=Jolleen2\TrainingPlayer.onnx. Traceback (most recent call last): File "C:\Users\Utente\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 197, in _run_module_as_main return _run_code(code, main_globals, None, File "C:\Users\Utente\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 87, in _run_code exec(code, run_globals) File "C:\Users\Utente\Fight For Life\MLvenv\Scripts\mlagents-learn.exe__main__.py", line 7, in