facebookresearch / fairseq

Facebook AI Research Sequence-to-Sequence Toolkit written in Python.
MIT License
30.47k stars 6.41k forks source link

ValidationError: Value '1.0' could not be converted to Integer #2935

Open VikasRajashekar opened 3 years ago

VikasRajashekar commented 3 years ago

how to resolve this error:

I am just running the example code form your tutorial/Readme and I have set it up as you have given instructions.

---------------------------------------------------------------------------
ValidationError                           Traceback (most recent call last)
~/anaconda3/envs/fair2/lib/python3.7/site-packages/hydra/_internal/config_loader_impl.py in _apply_overrides_to_config(overrides, cfg)
    512                     try:
--> 513                         OmegaConf.update(cfg, key, value, merge=True)
    514                     except (ConfigAttributeError, ConfigKeyError) as ex:

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/omegaconf.py in update(cfg, key, value, merge)
    607                 assert isinstance(node, BaseContainer)
--> 608                 node.merge_with(value)
    609                 return

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/basecontainer.py in merge_with(self, *others)
    330         except Exception as e:
--> 331             self._format_and_raise(key=None, value=None, cause=e)
    332 

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/base.py in _format_and_raise(self, key, value, cause, type_override)
    100             cause=cause,
--> 101             type_override=type_override,
    102         )

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/_utils.py in format_and_raise(node, key, value, msg, cause, type_override)
    628             ex.__dict__ = copy.deepcopy(cause.__dict__)
--> 629         _raise(ex, cause)
    630 

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/_utils.py in _raise(ex, cause)
    609         ex.__cause__ = None
--> 610     raise ex  # set end OC_CAUSE=1 for full backtrace
    611 

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/basecontainer.py in merge_with(self, *others)
    328         try:
--> 329             self._merge_with(*others)
    330         except Exception as e:

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/basecontainer.py in _merge_with(self, *others)
    367                         for item in other:
--> 368                             self.append(item)
    369 

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/listconfig.py in append(self, item)
    227         except Exception as e:
--> 228             self._format_and_raise(key=index, value=item, cause=e)
    229             assert False

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/base.py in _format_and_raise(self, key, value, cause, type_override)
    100             cause=cause,
--> 101             type_override=type_override,
    102         )

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/_utils.py in format_and_raise(node, key, value, msg, cause, type_override)
    693 
--> 694     _raise(ex, cause)
    695 

~/anaconda3/envs/fair2/lib/python3.7/site-packages/omegaconf/_utils.py in _raise(ex, cause)
    609         ex.__cause__ = None
--> 610     raise ex  # set end OC_CAUSE=1 for full backtrace
    611 

ValidationError: Value '1.0' could not be converted to Integer
    full_key: optimization.update_freq[0]
    reference_type=List[int]
    object_type=list

The above exception was the direct cause of the following exception:

ConfigCompositionException                Traceback (most recent call last)
<ipython-input-5-7b307de7abc6> in <module>
      7 # Note: WMT'19 models use fastBPE instead of subword_nmt, see instructions below
      8 en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt16.en-de',
----> 9                        tokenizer='moses', bpe='subword_nmt')
     10 en2de.eval()  # disable dropout
     11 

~/anaconda3/envs/fair2/lib/python3.7/site-packages/torch/hub.py in load(repo_or_dir, model, *args, **kwargs)
    368         repo_or_dir = _get_cache_or_reload(repo_or_dir, force_reload, verbose)
    369 
--> 370     model = _load_local(repo_or_dir, model, *args, **kwargs)
    371     return model
    372 

~/anaconda3/envs/fair2/lib/python3.7/site-packages/torch/hub.py in _load_local(hubconf_dir, model, *args, **kwargs)
    397 
    398     entry = _load_entry_from_hubconf(hub_module, model)
--> 399     model = entry(*args, **kwargs)
    400 
    401     sys.path.remove(hubconf_dir)

~/.cache/torch/hub/pytorch_fairseq_master/fairseq/models/fairseq_model.py in from_pretrained(cls, model_name_or_path, checkpoint_file, data_name_or_path, **kwargs)
    275             data_name_or_path,
    276             archive_map=cls.hub_models(),
--> 277             **kwargs,
    278         )
    279         logger.info(x["args"])

~/.cache/torch/hub/pytorch_fairseq_master/fairseq/hub_utils.py in from_pretrained(model_name_or_path, checkpoint_file, data_name_or_path, archive_map, **kwargs)
     71     models, args, task = checkpoint_utils.load_model_ensemble_and_task(
     72         [os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
---> 73         arg_overrides=kwargs,
     74     )
     75 

~/.cache/torch/hub/pytorch_fairseq_master/fairseq/checkpoint_utils.py in load_model_ensemble_and_task(filenames, arg_overrides, task, strict, suffix, num_shards, state)
    289                 raise IOError("Model file not found: {}".format(filename))
    290             if state is None:
--> 291                 state = load_checkpoint_to_cpu(filename, arg_overrides)
    292             if "args" in state and state["args"] is not None:
    293                 cfg = convert_namespace_to_omegaconf(state["args"])

~/.cache/torch/hub/pytorch_fairseq_master/fairseq/checkpoint_utils.py in load_checkpoint_to_cpu(path, arg_overrides)
    235         overwrite_args_by_name(state["cfg"], arg_overrides)
    236 
--> 237     state = _upgrade_state_dict(state)
    238     return state
    239 

~/.cache/torch/hub/pytorch_fairseq_master/fairseq/checkpoint_utils.py in _upgrade_state_dict(state)
    466             state["args"].post_process = state["args"].remove_bpe
    467 
--> 468         state["cfg"] = convert_namespace_to_omegaconf(state["args"])
    469 
    470     if "cfg" in state and state["cfg"] is not None:

~/.cache/torch/hub/pytorch_fairseq_master/fairseq/dataclass/utils.py in convert_namespace_to_omegaconf(args)
    343     with initialize(config_path=config_path):
    344         try:
--> 345             composed_cfg = compose("config", overrides=overrides, strict=False)
    346         except:
    347             logger.error("Error when composing. Overrides: " + str(overrides))

~/anaconda3/envs/fair2/lib/python3.7/site-packages/hydra/experimental/compose.py in compose(config_name, overrides, strict, return_hydra_config)
     35         strict=strict,
     36         from_shell=False,
---> 37         with_log_configuration=False,
     38     )
     39     assert isinstance(cfg, DictConfig)

~/anaconda3/envs/fair2/lib/python3.7/site-packages/hydra/_internal/hydra.py in compose_config(self, config_name, overrides, run_mode, strict, with_log_configuration, from_shell)
    510             strict=strict,
    511             run_mode=run_mode,
--> 512             from_shell=from_shell,
    513         )
    514         with open_dict(cfg):

~/anaconda3/envs/fair2/lib/python3.7/site-packages/hydra/_internal/config_loader_impl.py in load_configuration(self, config_name, overrides, run_mode, strict, from_shell)
    154                 run_mode=run_mode,
    155                 strict=strict,
--> 156                 from_shell=from_shell,
    157             )
    158         except OmegaConfBaseException as e:

~/anaconda3/envs/fair2/lib/python3.7/site-packages/hydra/_internal/config_loader_impl.py in _load_configuration(self, config_name, overrides, run_mode, strict, from_shell)
    275 
    276         # Apply command line overrides after enabling strict flag
--> 277         ConfigLoaderImpl._apply_overrides_to_config(config_overrides, cfg)
    278 
    279         app_overrides = []

~/anaconda3/envs/fair2/lib/python3.7/site-packages/hydra/_internal/config_loader_impl.py in _apply_overrides_to_config(overrides, cfg)
    520                 raise ConfigCompositionException(
    521                     f"Error merging override {override.input_line}"
--> 522                 ) from ex
    523 
    524     @staticmethod

ConfigCompositionException: Error merging override optimization.update_freq=[1.0]
sophgit commented 3 years ago

Hi, I am having the same error and would also appreciate some help! Thanks :)

myleott commented 3 years ago

Which tutorial/README did you follow? Can you share the exact command you ran, and also mention what version of fairseq you're using (0.10.1, master, ...)?

ZhaoQianfeng commented 3 years ago

I got the same error when I follow the fairseq document tutorial,I download wmt16.en-de.joined-dict.transformer,and follow the instructions.But failded, same error.

ZhaoQianfeng commented 3 years ago

the command I use is:

>MODEL_DIR=wmt16.en-de.joined-dict.transformer
>fairseq-interactive --path $MODEL_DIR/model.pt $MODEL_DIR --beam 5 --source-lang en --target-lang de --tokenizer moses --bpe subword_nmt --bpe-codes $MODEL_DIR/bpecodes

The fairseq version is 1.0.0a0+f13f299

ZhaoQianfeng commented 3 years ago

the command I use is:

>MODEL_DIR=wmt16.en-de.joined-dict.transformer
>fairseq-interactive --path $MODEL_DIR/model.pt $MODEL_DIR --beam 5 --source-lang en --target-lang de --tokenizer moses --bpe subword_nmt --bpe-codes $MODEL_DIR/bpecodes

The fairseq version is 1.0.0a0+f13f299

after I uninstalled master(1.0.0) version and installed v0.10.0, it works fine and no error

yuchenlin commented 3 years ago

having the same error when using the master version and run

torch.hub.load('pytorch/fairseq', 'transformer.wmt14.en-fr',
                                    checkpoint_file='model.pt',
                                    tokenizer='moses', bpe='fastbpe')

but it works with other models such as transformer.wmt19.en-de

mcao516 commented 3 years ago

I manually set update_freq to [1] in fairseq/fairseq/checkpoint_utils.py and it works:

def load_checkpoint_to_cpu(path, arg_overrides=None):
    """Loads a checkpoint to CPU (with upgrading for backward compatibility)."""
    with open(PathManager.get_local_path(path), "rb") as f:
        state = torch.load(f, map_location=torch.device("cpu"))

    if "args" in state and state["args"] is not None and arg_overrides is not None:
        args = state["args"]
        for arg_name, arg_val in arg_overrides.items():
            setattr(args, arg_name, arg_val)

    if "cfg" in state and state["cfg"] is not None and arg_overrides is not None:
        overwrite_args_by_name(state["cfg"], arg_overrides)

    state['args'].update_freq = [1]  # manually set to [1]
    state = _upgrade_state_dict(state)
    return state
stale[bot] commented 3 years ago

This issue has been automatically marked as stale. If this issue is still affecting you, please leave any comment (for example, "bump"), and we'll keep it open. We are sorry that we haven't been able to prioritize it yet. If you have any new additional information, please include it with your comment!