huggingface / transformers

🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.
https://huggingface.co/transformers
Apache License 2.0
132.24k stars 26.34k forks source link

git.exc.InvalidGitRepositoryError when running finetune_rag.py #13409

Closed gianlucabusatta closed 3 years ago

gianlucabusatta commented 3 years ago

Environment info

- `transformers` version: 4.10.0
- Platform: Linux-5.4.104+-x86_64-with-Ubuntu-18.04-bionic
- Python version: 3.7.11
- PyTorch version (GPU?): 1.9.0+cu102 (True)
- Tensorflow version (GPU?): 2.6.0 (True)
- Flax version (CPU?/GPU?/TPU?): not installed (NA)
- Jax version: not installed
- JaxLib version: not installed
- Using GPU in script?: Yes
- Using distributed or parallel set-up in script?: No

I'm on Colab.

Who can help

research_projects/rag: @patrickvonplaten, @lhoestq

To reproduce

Steps to reproduce the behavior:

  1. !python transformers/examples/research_projects/rag/consolidate_rag_checkpoint.py \
    --model_type rag_token \
    --generator_name_or_path facebook/mbart-large-cc25 \
    --question_encoder_name_or_path voidful/dpr-question_encoder-bert-base-multilingual \
    --dest /content/checkpoint
  2. !python transformers/examples/research_projects/rag/finetune_rag.py \
    --data_dir /content/transformers/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data \
    --output_dir /content/finetune_output \
    --model_name_or_path /content/checkpoint \
    --model_type rag_token \
    --fp16 \
    --use_dummy_dataset True

    Error

    
    loading configuration file /content/checkpoint/config.json
    Model config RagConfig {
    "architectures": [
    "RagTokenForGeneration"
    ],
    "dataset": "wiki_dpr",
    "dataset_split": "train",
    "do_deduplication": true,
    "do_marginalize": false,
    "doc_sep": " // ",
    "exclude_bos_score": false,
    "forced_eos_token_id": 2,
    "generator": {
    "_name_or_path": "",
    "_num_labels": 3,
    "activation_dropout": 0.0,
    "activation_function": "gelu",
    "add_bias_logits": false,
    "add_cross_attention": false,
    "add_final_layer_norm": true,
    "architectures": [
      "MBartForConditionalGeneration"
    ],
    "attention_dropout": 0.0,
    "bad_words_ids": null,
    "bos_token_id": 0,
    "chunk_size_feed_forward": 0,
    "classif_dropout": 0.0,
    "classifier_dropout": 0.0,
    "d_model": 1024,
    "decoder_attention_heads": 16,
    "decoder_ffn_dim": 4096,
    "decoder_layerdrop": 0.0,
    "decoder_layers": 12,
    "decoder_start_token_id": null,
    "diversity_penalty": 0.0,
    "do_sample": false,
    "dropout": 0.1,
    "early_stopping": false,
    "encoder_attention_heads": 16,
    "encoder_ffn_dim": 4096,
    "encoder_layerdrop": 0.0,
    "encoder_layers": 12,
    "encoder_no_repeat_ngram_size": 0,
    "eos_token_id": 2,
    "finetuning_task": null,
    "forced_bos_token_id": null,
    "forced_eos_token_id": 2,
    "gradient_checkpointing": false,
    "id2label": {
      "0": "LABEL_0",
      "1": "LABEL_1",
      "2": "LABEL_2"
    },
    "init_std": 0.02,
    "is_decoder": false,
    "is_encoder_decoder": true,
    "label2id": {
      "LABEL_0": 0,
      "LABEL_1": 1,
      "LABEL_2": 2
    },
    "length_penalty": 1.0,
    "max_length": 1024,
    "max_position_embeddings": 1024,
    "min_length": 0,
    "model_type": "mbart",
    "no_repeat_ngram_size": 0,
    "normalize_before": true,
    "normalize_embedding": true,
    "num_beam_groups": 1,
    "num_beams": 5,
    "num_hidden_layers": 12,
    "num_return_sequences": 1,
    "output_attentions": false,
    "output_hidden_states": false,
    "output_past": true,
    "output_scores": false,
    "pad_token_id": 1,
    "prefix": null,
    "problem_type": null,
    "pruned_heads": {},
    "remove_invalid_values": false,
    "repetition_penalty": 1.0,
    "return_dict": true,
    "return_dict_in_generate": false,
    "scale_embedding": true,
    "sep_token_id": null,
    "static_position_embeddings": false,
    "task_specific_params": {
      "translation_en_to_ro": {
        "decoder_start_token_id": 250020
      }
    },
    "temperature": 1.0,
    "tie_encoder_decoder": false,
    "tie_word_embeddings": true,
    "tokenizer_class": null,
    "top_k": 50,
    "top_p": 1.0,
    "torch_dtype": null,
    "torchscript": false,
    "transformers_version": "4.10.0",
    "use_bfloat16": false,
    "use_cache": true,
    "vocab_size": 250027
    },
    "index_name": "exact",
    "index_path": null,
    "is_encoder_decoder": true,
    "label_smoothing": 0.0,
    "max_combined_length": 300,
    "model_type": "rag",
    "n_docs": 5,
    "output_retrieved": false,
    "passages_path": null,
    "question_encoder": {
    "_name_or_path": "",
    "add_cross_attention": false,
    "architectures": [
      "DPRQuestionEncoder"
    ],
    "attention_probs_dropout_prob": 0.1,
    "bad_words_ids": null,
    "bos_token_id": null,
    "chunk_size_feed_forward": 0,
    "decoder_start_token_id": null,
    "diversity_penalty": 0.0,
    "do_sample": false,
    "early_stopping": false,
    "encoder_no_repeat_ngram_size": 0,
    "eos_token_id": null,
    "finetuning_task": null,
    "forced_bos_token_id": null,
    "forced_eos_token_id": null,
    "gradient_checkpointing": false,
    "hidden_act": "gelu",
    "hidden_dropout_prob": 0.1,
    "hidden_size": 768,
    "id2label": {
      "0": "LABEL_0",
      "1": "LABEL_1"
    },
    "initializer_range": 0.02,
    "intermediate_size": 3072,
    "is_decoder": false,
    "is_encoder_decoder": false,
    "label2id": {
      "LABEL_0": 0,
      "LABEL_1": 1
    },
    "language": "multilingual",
    "layer_norm_eps": 1e-12,
    "length_penalty": 1.0,
    "max_length": 20,
    "max_position_embeddings": 512,
    "min_length": 0,
    "model_type": "dpr",
    "name": "DPRQuestionEncoder",
    "no_repeat_ngram_size": 0,
    "num_attention_heads": 12,
    "num_beam_groups": 1,
    "num_beams": 1,
    "num_hidden_layers": 12,
    "num_return_sequences": 1,
    "output_attentions": false,
    "output_hidden_states": false,
    "output_scores": false,
    "pad_token_id": 0,
    "position_embedding_type": "absolute",
    "prefix": null,
    "problem_type": null,
    "projection_dim": 0,
    "pruned_heads": {},
    "remove_invalid_values": false,
    "repetition_penalty": 1.0,
    "return_dict": true,
    "return_dict_in_generate": false,
    "revision": null,
    "sep_token_id": null,
    "task_specific_params": null,
    "temperature": 1.0,
    "tie_encoder_decoder": false,
    "tie_word_embeddings": true,
    "tokenizer_class": null,
    "top_k": 50,
    "top_p": 1.0,
    "torch_dtype": null,
    "torchscript": false,
    "transformers_version": "4.10.0",
    "type_vocab_size": 2,
    "use_bfloat16": false,
    "vocab_size": 119547
    },
    "reduce_loss": false,
    "retrieval_batch_size": 8,
    "retrieval_vector_size": 768,
    "title_sep": " / ",
    "torch_dtype": "float32",
    "transformers_version": null,
    "use_cache": true,
    "use_dummy_dataset": false,
    "vocab_size": null
    }

Could not locate the tokenizer configuration file, will try to use the model config instead. Didn't find file /content/checkpoint/question_encoder_tokenizer/added_tokens.json. We won't load it. loading file /content/checkpoint/question_encoder_tokenizer/vocab.txt loading file /content/checkpoint/question_encoder_tokenizer/tokenizer.json loading file None loading file /content/checkpoint/question_encoder_tokenizer/special_tokens_map.json loading file /content/checkpoint/question_encoder_tokenizer/tokenizer_config.json Could not locate the tokenizer configuration file, will try to use the model config instead. Didn't find file /content/checkpoint/generator_tokenizer/sentencepiece.bpe.model. We won't load it. Didn't find file /content/checkpoint/generator_tokenizer/added_tokens.json. We won't load it. loading file None loading file /content/checkpoint/generator_tokenizer/tokenizer.json loading file None loading file /content/checkpoint/generator_tokenizer/special_tokens_map.json loading file /content/checkpoint/generator_tokenizer/tokenizer_config.json Assigning ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] to the additional_special_tokens key of the tokenizer Loading passages from wiki_dpr WARNING:datasets.builder:Using custom data configuration dummy.psgs_w100.nq.no_index-dummy=True,with_index=False WARNING:datasets.builder:Reusing dataset wiki_dpr (/root/.cache/huggingface/datasets/wiki_dpr/dummy.psgs_w100.nq.no_index-dummy=True,with_index=False/0.0.0/91b145e64f5bc8b55a7b3e9f730786ad6eb19cd5bc020e2e02cdf7d0cb9db9c1) loading weights file /content/checkpoint/pytorch_model.bin All model checkpoint weights were used when initializing RagTokenForGeneration.

All the weights of RagTokenForGeneration were initialized from the model checkpoint at /content/checkpoint. If your task is similar to the task the model of the checkpoint was trained on, you can already use RagTokenForGeneration for predictions without further training. loading configuration file /content/checkpoint/config.json Model config RagConfig { "architectures": [ "RagTokenForGeneration" ], "dataset": "wiki_dpr", "dataset_split": "train", "do_deduplication": true, "do_marginalize": false, "doc_sep": " // ", "exclude_bos_score": false, "forced_eos_token_id": 2, "generator": { "_name_or_path": "", "_num_labels": 3, "activation_dropout": 0.0, "activation_function": "gelu", "add_bias_logits": false, "add_cross_attention": false, "add_final_layer_norm": true, "architectures": [ "MBartForConditionalGeneration" ], "attention_dropout": 0.0, "bad_words_ids": null, "bos_token_id": 0, "chunk_size_feed_forward": 0, "classif_dropout": 0.0, "classifier_dropout": 0.0, "d_model": 1024, "decoder_attention_heads": 16, "decoder_ffn_dim": 4096, "decoder_layerdrop": 0.0, "decoder_layers": 12, "decoder_start_token_id": null, "diversity_penalty": 0.0, "do_sample": false, "dropout": 0.1, "early_stopping": false, "encoder_attention_heads": 16, "encoder_ffn_dim": 4096, "encoder_layerdrop": 0.0, "encoder_layers": 12, "encoder_no_repeat_ngram_size": 0, "eos_token_id": 2, "finetuning_task": null, "forced_bos_token_id": null, "forced_eos_token_id": 2, "gradient_checkpointing": false, "id2label": { "0": "LABEL_0", "1": "LABEL_1", "2": "LABEL_2" }, "init_std": 0.02, "is_decoder": false, "is_encoder_decoder": true, "label2id": { "LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2 }, "length_penalty": 1.0, "max_length": 1024, "max_position_embeddings": 1024, "min_length": 0, "model_type": "mbart", "no_repeat_ngram_size": 0, "normalize_before": true, "normalize_embedding": true, "num_beam_groups": 1, "num_beams": 5, "num_hidden_layers": 12, "num_return_sequences": 1, "output_attentions": false, "output_hidden_states": false, "output_past": true, "output_scores": false, "pad_token_id": 1, "prefix": null, "problem_type": null, "pruned_heads": {}, "remove_invalid_values": false, "repetition_penalty": 1.0, "return_dict": true, "return_dict_in_generate": false, "scale_embedding": true, "sep_token_id": null, "static_position_embeddings": false, "task_specific_params": { "translation_en_to_ro": { "decoder_start_token_id": 250020 } }, "temperature": 1.0, "tie_encoder_decoder": false, "tie_word_embeddings": true, "tokenizer_class": null, "top_k": 50, "top_p": 1.0, "torch_dtype": null, "torchscript": false, "transformers_version": "4.10.0", "use_bfloat16": false, "use_cache": true, "vocab_size": 250027 }, "index_name": "exact", "index_path": null, "is_encoder_decoder": true, "label_smoothing": 0.0, "max_combined_length": 300, "model_type": "rag", "n_docs": 5, "output_retrieved": false, "passages_path": null, "question_encoder": { "_name_or_path": "", "add_cross_attention": false, "architectures": [ "DPRQuestionEncoder" ], "attention_probs_dropout_prob": 0.1, "bad_words_ids": null, "bos_token_id": null, "chunk_size_feed_forward": 0, "decoder_start_token_id": null, "diversity_penalty": 0.0, "do_sample": false, "early_stopping": false, "encoder_no_repeat_ngram_size": 0, "eos_token_id": null, "finetuning_task": null, "forced_bos_token_id": null, "forced_eos_token_id": null, "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "id2label": { "0": "LABEL_0", "1": "LABEL_1" }, "initializer_range": 0.02, "intermediate_size": 3072, "is_decoder": false, "is_encoder_decoder": false, "label2id": { "LABEL_0": 0, "LABEL_1": 1 }, "language": "multilingual", "layer_norm_eps": 1e-12, "length_penalty": 1.0, "max_length": 20, "max_position_embeddings": 512, "min_length": 0, "model_type": "dpr", "name": "DPRQuestionEncoder", "no_repeat_ngram_size": 0, "num_attention_heads": 12, "num_beam_groups": 1, "num_beams": 1, "num_hidden_layers": 12, "num_return_sequences": 1, "output_attentions": false, "output_hidden_states": false, "output_scores": false, "pad_token_id": 0, "position_embedding_type": "absolute", "prefix": null, "problem_type": null, "projection_dim": 0, "pruned_heads": {}, "remove_invalid_values": false, "repetition_penalty": 1.0, "return_dict": true, "return_dict_in_generate": false, "revision": null, "sep_token_id": null, "task_specific_params": null, "temperature": 1.0, "tie_encoder_decoder": false, "tie_word_embeddings": true, "tokenizer_class": null, "top_k": 50, "top_p": 1.0, "torch_dtype": null, "torchscript": false, "transformers_version": "4.10.0", "type_vocab_size": 2, "use_bfloat16": false, "vocab_size": 119547 }, "reduce_loss": false, "retrieval_batch_size": 8, "retrieval_vector_size": 768, "title_sep": " / ", "torch_dtype": "float32", "transformers_version": null, "use_cache": true, "use_dummy_dataset": false, "vocab_size": null }

Could not locate the tokenizer configuration file, will try to use the model config instead. Didn't find file /content/checkpoint/question_encoder_tokenizer/added_tokens.json. We won't load it. loading file /content/checkpoint/question_encoder_tokenizer/vocab.txt loading file /content/checkpoint/question_encoder_tokenizer/tokenizer.json loading file None loading file /content/checkpoint/question_encoder_tokenizer/special_tokens_map.json loading file /content/checkpoint/question_encoder_tokenizer/tokenizer_config.json Could not locate the tokenizer configuration file, will try to use the model config instead. Didn't find file /content/checkpoint/generator_tokenizer/sentencepiece.bpe.model. We won't load it. Didn't find file /content/checkpoint/generator_tokenizer/added_tokens.json. We won't load it. loading file None loading file /content/checkpoint/generator_tokenizer/tokenizer.json loading file None loading file /content/checkpoint/generator_tokenizer/special_tokens_map.json loading file /content/checkpoint/generator_tokenizer/tokenizer_config.json Assigning ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] to the additional_special_tokens key of the tokenizer Traceback (most recent call last): File "transformers/examples/research_projects/rag/finetune_rag.py", line 617, in main(args) File "transformers/examples/research_projects/rag/finetune_rag.py", line 554, in main model: GenerativeQAModule = GenerativeQAModule(args) File "transformers/examples/research_projects/rag/finetune_rag.py", line 157, in init save_git_info(self.hparams.output_dir) File "/content/transformers/examples/research_projects/rag/utils_rag.py", line 145, in save_git_info repo_infos = get_git_info() File "/content/transformers/examples/research_projects/rag/utils_rag.py", line 160, in get_git_info repo = git.Repo(search_parent_directories=True) File "/usr/local/lib/python3.7/dist-packages/git/repo/base.py", line 220, in init self.working_dir = self._working_tree_dir or self.common_dir # type: Optional[PathLike] File "/usr/local/lib/python3.7/dist-packages/git/repo/base.py", line 303, in common_dir raise InvalidGitRepositoryError() git.exc.InvalidGitRepositoryError



## Expected behavior

Not having this error.
gianlucabusatta commented 3 years ago

A workaround could be creating a git repository and making at at least one commit.

lhoestq commented 3 years ago

Hi ! Can you try running the command from inside the transformers directory ? This way it will be able to find the git info of the transformers repo