niiceMing / CMTA

(NIPS23)Contrastive Modules with Temporal Attention for Multi-Task Reinforcement Learning
5 stars 0 forks source link

hydra.errors.HydraException: Error calling 'mtrl.experiment.metaworld.Experiment' : Error calling 'metaworld.ML1' : Error loading module 'metaworld.ML1' #1

Closed Logan-lxw closed 11 months ago

Logan-lxw commented 11 months ago

I meet one problem,and I can not find the correct answer from MTRL.

[2023-12-06 00:24:22,834][default_logger][INFO] - {"setup": {"seed": 42, "setup": "metaworld", "algo": null, "base_path": "/home/lxv/RlCode/CMTA-main", "dir_name": "logs_fix", "save_dir": "${setup.base_path}/${setup.dir_name}/${setup.id}", "device": "cuda:0", "id": "None_seed_42", "description": "Sample Task", "tags": null, "git": {"commit_id": null, "has_uncommitted_changes": null, "issue_id": null}, "date": "2023-12-06 00:24:22", "slurm_id": "-1", "debug": {"should_enable": false}}, "experiment": {"name": "metaworld", "builder": {"_target_": "mtrl.experiment.${experiment.name}.Experiment"}, "init_steps": 3000, "num_train_steps": 1000000, "eval_freq": 3000, "num_eval_episodes": 10, "should_resume": true, "eval_only": false, "random_pos": false, "save": {"model": {"retain_last_n": 1}, "buffer": {"should_save": true, "size_per_chunk": 15000, "num_samples_to_save": -1}}, "save_dir": "${setup.save_dir}", "save_video": true, "envs_to_exclude_during_training": null}, "agent": {"name": "sac", "encoder_feature_dim": 64, "num_layers": 4, "num_filters": 32, "builder": {"_target_": "mtrl.agent.sac.Agent", "actor_cfg": "${agent.actor}", "critic_cfg": "${agent.critic}", "multitask_cfg": "${agent.multitask}", "alpha_optimizer_cfg": "${agent.optimizers.alpha}", "actor_optimizer_cfg": "${agent.optimizers.actor}", "critic_optimizer_cfg": "${agent.optimizers.critic}", "discount": 0.99, "init_temperature": 0.1, "actor_update_freq": 2, "critic_tau": 0.01, "critic_target_update_freq": 2, "encoder_tau": 0.05}, "actor": {"_target_": "mtrl.agent.components.actor.Actor", "num_layers": 3, "hidden_dim": 512, "log_std_bounds": [-20, 2], "encoder_cfg": "${agent.encoder}", "multitask_cfg": "${agent.multitask}"}, "critic": {"_target_": "mtrl.agent.components.critic.Critic", "hidden_dim": "${agent.actor.hidden_dim}", "num_layers": "${agent.actor.num_layers}", "encoder_cfg": "${agent.encoder}", "multitask_cfg": "${agent.multitask}"}, "encoder": {"type_to_select": "identity", "identity": {"type": "identity", "feature_dim": "${agent.encoder_feature_dim}"}, "feedforward": {"type": "feedforward", "hidden_dim": 64, "num_layers": 2, "feature_dim": "${agent.encoder_feature_dim}", "should_tie_encoders": true}, "lstm": {"type": "lstm", "hidden_dim": 64, "num_layers": 2, "feature_dim": "${agent.encoder_feature_dim}", "should_tie_encoders": true}, "film": {"type": "film", "hidden_dim": 64, "num_layers": 2, "feature_dim": "${agent.encoder_feature_dim}", "should_tie_encoders": true}, "moe": {"type": "moe", "encoder_cfg": {"type": "feedforward", "hidden_dim": 64, "num_layers": 2, "feature_dim": "${agent.encoder_feature_dim}", "should_tie_encoders": true}, "num_experts": 9, "task_id_to_encoder_id_cfg": {"mode": "cluster", "num_envs": "${env.num_envs}", "gate": {"embedding_dim": 64, "hidden_dim": 64, "num_layers": 2, "temperature": 1.0, "should_use_soft_attention": false, "topk": 2, "task_encoder_cfg": {"should_use_task_encoding": true, "should_detach_task_encoding": true}}, "attention": {"embedding_dim": 64, "hidden_dim": 64, "num_layers": 1, "temperature": 1.0, "should_use_soft_attention": true, "task_encoder_cfg": {"should_use_task_encoding": true, "should_detach_task_encoding": false}}, "rnn_attention": {"rnn_hidden_dim": 64, "hidden_dim": 64, "num_layers": 1, "temperature": 1.0, "should_use_soft_attention": true}, "cluster": {"env_name": "${env.name}", "task_description": "${env.description}", "ordered_task_list": "${env.ordered_task_list}", "mapping_cfg": "${agent.task_to_encoder_cluster}", "num_eval_episodes": "${experiment.num_eval_episodes}", "batch_size": "${replay_buffer.batch_size}"}, "identity": {"num_eval_episodes": "${experiment.num_eval_episodes}", "batch_size": "${replay_buffer.batch_size}"}, "ensemble": {"num_eval_episodes": "${experiment.num_eval_episodes}", "batch_size": "${replay_buffer.batch_size}"}}}, "factorized_moe": {"type": "fmoe", "encoder_cfg": "${agent.encoder.feedforward}", "num_factors": 2, "num_experts_per_factor": [5, 5]}, "pixel": {"type": "pixel", "feature_dim": "${agent.encoder_feature_dim}", "num_filters": "${agent.num_filters}", "num_layers": "${agent.num_layers}"}}, "transition_model": {"_target_": "mtrl.agent.components.transition_model.make_transition_model", "transition_cfg": {"type": "", "feature_dim": "${agent.encoder_feature_dim}", "layer_width": 512}, "multitask_cfg": "${agent.multitask}"}, "mask": {"num_tasks": "${env.num_envs}", "num_eval_episodes": "${experiment.num_eval_episodes}", "batch_size": "${replay_buffer.batch_size}"}, "multitask": {"num_envs": "${env.num_envs}", "should_use_disentangled_alpha": false, "should_use_reweighting": false, "should_use_task_encoder": false, "should_use_multi_head_policy": false, "should_use_disjoint_policy": false, "should_use_attention_multi_head_policy": false, "task_encoder_cfg": {"model_cfg": {"_target_": "mtrl.agent.components.task_encoder.TaskEncoder", "pretrained_embedding_cfg": {"should_use": false, "path_to_load_from": "/workspace/S/lansiming/mtrl/metadata/task_embedding/roberta_small/${env.name}.json", "ordered_task_list": "${env.ordered_task_list}"}, "num_embeddings": "${agent.multitask.num_envs}", "embedding_dim": 64, "hidden_dim": 64, "num_layers": 1, "output_dim": 64}, "optimizer_cfg": "${agent.optimizers.actor}", "losses_to_train": ["critic", "transition_reward", "decoder", "task_encoder"]}, "multi_head_policy_cfg": {"mask_cfg": "${agent.mask}"}, "actor_cfg": {"should_condition_model_on_task_info": false, "should_condition_encoder_on_task_info": true, "should_concatenate_task_info_with_encoder": true, "moe_cfg": {"mode": "soft_modularization", "num_experts": 4, "should_use": false}}, "critic_cfg": "${agent.multitask.actor_cfg}"}, "gradnorm": {"alpha": 1.0}, "task_to_encoder_cluster": {"mt10": {"cluster": {"action_close": ["close"], "action_default": ["insert", "pick and place", "press", "reach"], "action_open": ["open"], "action_push": ["push"], "object_default": ["button", "door", "peg", "revolving joint"], "object_drawer": ["drawer"], "object_goal": ["goal"], "object_puck": ["puck"], "object_window": ["window"]}}, "mt50": {"cluster": {"action_close": ["close"], "action_default": ["insert", "pick and place", "press", "reach"], "action_open": ["open"], "action_push": ["push"], "object_default": ["button", "door", "peg", "revolving joint"], "object_drawer": ["drawer"], "object_goal": ["goal"], "object_puck": ["puck"], "object_window": ["window"]}}}, "optimizers": {"actor": {"_target_": "torch.optim.Adam", "lr": 0.0003, "betas": [0.9, 0.999]}, "alpha": {"_target_": "torch.optim.Adam", "lr": 0.0003, "betas": [0.9, 0.999]}, "critic": {"_target_": "torch.optim.Adam", "lr": 0.0003, "betas": [0.9, 0.999]}, "decoder": {"_target_": "torch.optim.Adam", "lr": 0.0003, "betas": [0.9, 0.999], "weight_decay": 1e-07}, "encoder": {"_target_": "torch.optim.Adam", "lr": 0.0003, "betas": [0.9, 0.999]}}}, "env": {"name": "metaworld-ml1", "num_envs": 1, "benchmark": {"_target_": "metaworld.ML1", "env_name": "pick-place-v1"}, "should_perform_reward_normalization": true, "dummy": "${env.benchmark}", "description": null}, "replay_buffer": {"_target_": "mtrl.replay_buffer.ReplayBuffer", "env_obs_shape": null, "action_shape": null, "capacity": 5000000, "batch_size": 128}, "logger": {"_target_": "mtrl.logger.Logger", "logger_dir": "${setup.save_dir}", "use_tb": false}, "metrics": {"train": [["episode", "E", "int", "average"], ["step", "S", "int", "average"], ["duration", "D", "time", "average"], ["hours", "T", "time", "average"], ["episode_reward", "R", "float", "average"], ["success", "Su", "float", "average"], ["batch_reward", "BR", "float", "average"], ["actor_loss", "ALOSS", "float", "average"], ["critic_loss", "CLOSS", "float", "average"], ["ae_loss", "RLOSS", "float", "average"], ["ae_transition_loss", null, "float", "average"], ["reward_loss", null, "float", "average"], ["actor_target_entropy", null, "float", "average"], ["actor_entropy", null, "float", "average"], ["alpha_loss", null, "float", "average"], ["alpha_value_", null, "float", "average"], ["contrastive_loss", "MLOSS", "float", "average"], ["max_rat", "MR", "float", "average"], ["env_index", "ENV", "str", "constant"], ["episode_reward_env_index_", "R_", "float", "average"], ["success_env_index_", "Su_", "float", "average"], ["env_index_", "ENV_", "str", "constant"], ["batch_reward_agent_index_", null, "float", "average"], ["critic_loss_agent_index_", "AGENT_", "float", "average"], ["actor_distilled_agent_loss_agent_index_", null, "float", "average"], ["actor_loss_agent_index_", null, "float", "average"], ["actor_target_entropy_agent_index_", null, "float", "average"], ["actor_entropy_agent_index_", null, "float", "average"], ["alpha_loss_agent_index_", null, "float", "average"], ["alpha_value_agent_index_", null, "float", "average"], ["ae_loss_agent_index_", null, "float", "average"], ["info_nce_loss", "IN", "float", "average"], ["accuracy", null, "float", "average"]], "eval": [["episode", "E", "int", "average"], ["step", "S", "int", "average"], ["episode_reward", "R", "float", "average"], ["env_index", "ENV", "str", "constant"], ["success", "Su", "float", "average"], ["episode_reward_env_index_", "R_", "float", "average"], ["success_env_index_", "Su_", "float", "average"], ["env_index_", "ENV_", "str", "constant"], ["batch_reward_agent_index_", "AGENT_", "float", "average"]]}, "logbook": {"_target_": "ml_logger.logbook.make_config", "write_to_console": false, "logger_dir": "${setup.save_dir}", "create_multiple_log_files": false}, "status": "RUNNING", "logbook_id": "0", "logbook_timestamp": "12:24:22AM CST Dec 06, 2023", "logbook_type": "metadata"}
Starting Experiment at Wed Dec  6 00:24:22 2023
torch version = 1.7.1+cu110
Traceback (most recent call last):
  File "/home/lxv/anaconda3/envs/MTRL/lib/python3.6/site-packages/hydra/_internal/utils.py", line 529, in _locate
    module = import_module(mod)
  File "/home/lxv/anaconda3/envs/MTRL/lib/python3.6/importlib/__init__.py", line 126, in import_module
    return _bootstrap._gcd_import(name[level:], package, level)
  File "<frozen importlib._bootstrap>", line 991, in _gcd_import
  File "<frozen importlib._bootstrap>", line 930, in _sanity_check
ValueError: Empty module name

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/home/lxv/anaconda3/envs/MTRL/lib/python3.6/site-packages/hydra/utils.py", line 61, in call
    type_or_callable = _locate(cls)
  File "/home/lxv/anaconda3/envs/MTRL/lib/python3.6/site-packages/hydra/_internal/utils.py", line 532, in _locate
    raise ImportError(f"Error loading module '{path}'") from e
ImportError: Error loading module 'metaworld.ML1'

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/home/lxv/anaconda3/envs/MTRL/lib/python3.6/site-packages/hydra/utils.py", line 63, in call
    return _instantiate_class(type_or_callable, config, *args, **kwargs)
  File "/home/lxv/anaconda3/envs/MTRL/lib/python3.6/site-packages/hydra/_internal/utils.py", line 500, in _instantiate_class
    return clazz(*args, **final_kwargs)
  File "/home/lxv/RlCode/CMTA-main/mtrl/experiment/metaworld.py", line 20, in __init__
    super().__init__(config, experiment_id)
  File "/home/lxv/RlCode/CMTA-main/mtrl/experiment/multitask.py", line 27, in __init__
    super().__init__(config, experiment_id)
  File "/home/lxv/RlCode/CMTA-main/mtrl/experiment/experiment.py", line 33, in __init__
    self.envs, self.env_metadata = self.build_envs()
  File "/home/lxv/RlCode/CMTA-main/mtrl/experiment/metaworld.py", line 44, in build_envs
    benchmark = hydra.utils.instantiate(self.config.env.benchmark)
  File "/home/lxv/anaconda3/envs/MTRL/lib/python3.6/site-packages/hydra/utils.py", line 70, in call
    raise HydraException(f"Error calling '{cls}' : {e}") from e
hydra.errors.HydraException: Error calling 'metaworld.ML1' : Error loading module 'metaworld.ML1'

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/home/lxv/RlCode/CMTA-main/main.py", line 14, in launch
    return run(config)
  File "/home/lxv/RlCode/CMTA-main/mtrl/app/run.py", line 37, in run
    experiment_utils.prepare_and_run(config=config)
  File "/home/lxv/RlCode/CMTA-main/mtrl/experiment/utils.py", line 24, in prepare_and_run
    config.experiment.builder, config
  File "/home/lxv/anaconda3/envs/MTRL/lib/python3.6/site-packages/hydra/utils.py", line 70, in call
    raise HydraException(f"Error calling '{cls}' : {e}") from e
hydra.errors.HydraException: Error calling 'mtrl.experiment.metaworld.Experiment' : Error calling 'metaworld.ML1' : Error loading module 'metaworld.ML1'

Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.

And I meet the same problem at MTRL,so how you fix this?

niiceMing commented 11 months ago

You could find the solution in https://github.com/Farama-Foundation/Metaworld