NeuromatchAcademy / course-content-dl

NMA deep learning course
https://deeplearning.neuromatch.io/
Creative Commons Attribution 4.0 International
745 stars 270 forks source link

W2D4: Errors in the book #544

Closed spirosChv closed 2 years ago

spirosChv commented 2 years ago

Several Errors appear in the book:

  1. Setup --> Imports
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _dep_map(self)
   3030         try:
-> 3031             return self.__dep_map
   3032         except AttributeError:

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in __getattr__(self, attr)
   2824         if attr.startswith('_'):
-> 2825             raise AttributeError(attr)
   2826         return getattr(self._provider, attr)

AttributeError: _DistInfoDistribution__dep_map

During handling of the above exception, another exception occurred:

AttributeError                            Traceback (most recent call last)
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _parsed_pkg_info(self)
   3021         try:
-> 3022             return self._pkg_info
   3023         except AttributeError:

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in __getattr__(self, attr)
   2824         if attr.startswith('_'):
-> 2825             raise AttributeError(attr)
   2826         return getattr(self._provider, attr)

AttributeError: _pkg_info

During handling of the above exception, another exception occurred:

FileNotFoundError                         Traceback (most recent call last)
/tmp/ipykernel_74829/233875289.py in <module>
     29 
     30 # textattack
---> 31 from textattack.transformations import WordSwapQWERTY
     32 from textattack.transformations import WordSwapExtend
     33 from textattack.transformations import WordSwapContract

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/__init__.py in <module>
      9 TextAttack provides components for common NLP tasks like sentence encoding, grammar-checking, and word replacement that can be used on their own.
     10 """
---> 11 from .attack_args import AttackArgs, CommandLineAttackArgs
     12 from .augment_args import AugmenterArgs
     13 from .dataset_args import DatasetArgs

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_args.py in <module>
     11 
     12 import textattack
---> 13 from textattack.shared.utils import ARGS_SPLIT_TOKEN, load_module_from_file
     14 
     15 from .attack import Attack

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/__init__.py in <module>
     13 from . import validators
     14 
---> 15 from .attacked_text import AttackedText
     16 from .word_embeddings import AbstractWordEmbedding, WordEmbedding, GensimWordEmbedding
     17 from .checkpoint import AttackCheckpoint

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/attacked_text.py in <module>
     10 import math
     11 
---> 12 import flair
     13 from flair.data import Sentence
     14 import numpy as np

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/__init__.py in <module>
     18 
     19 from . import data
---> 20 from . import models
     21 from . import visual
     22 from . import trainers

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/models/__init__.py in <module>
----> 1 from .sequence_tagger_model import SequenceTagger, MultiTagger
      2 from .language_model import LanguageModel
      3 from .text_classification_model import TextClassifier
      4 from .pairwise_classification_model import TextPairClassifier
      5 from .relation_extractor_model import RelationExtractor

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/models/sequence_tagger_model.py in <module>
     15 from tqdm import tqdm
     16 
---> 17 import flair.nn
     18 from flair.data import Dictionary, Sentence, Label
     19 from flair.datasets import SentenceDataset, DataLoader

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/nn/__init__.py in <module>
      1 from .dropout import LockedDropout, WordDropout
----> 2 from .model import Model, Classifier, DefaultClassifier

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/nn/model.py in <module>
     14 from flair import file_utils
     15 from flair.data import DataPoint, Sentence, Dictionary, SpanLabel
---> 16 from flair.datasets import DataLoader, SentenceDataset
     17 from flair.training_utils import Result, store_embeddings
     18 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/datasets/__init__.py in <module>
    275 
    276 # Expose all relation extraction datasets
--> 277 from .relation_extraction import RE_ENGLISH_SEMEVAL2010
    278 from .relation_extraction import RE_ENGLISH_TACRED
    279 from .relation_extraction import RE_ENGLISH_CONLL04

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/datasets/relation_extraction.py in <module>
     10 
     11 import conllu
---> 12 import gdown
     13 
     14 import flair

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/gdown/__init__.py in <module>
      9 
     10 __author__ = "Kentaro Wada <www.kentaro.wada@gmail.com>"
---> 11 __version__ = pkg_resources.get_distribution("gdown").version

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in get_distribution(dist)
    480         dist = Requirement.parse(dist)
    481     if isinstance(dist, Requirement):
--> 482         dist = get_provider(dist)
    483     if not isinstance(dist, Distribution):
    484         raise TypeError("Expected string, Requirement, or Distribution", dist)

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in get_provider(moduleOrReq)
    356     """Return an IResourceProvider for the named module or requirement"""
    357     if isinstance(moduleOrReq, Requirement):
--> 358         return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
    359     try:
    360         module = sys.modules[moduleOrReq]

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in require(self, *requirements)
    899         included, even if they were already activated in this working set.
    900         """
--> 901         needed = self.resolve(parse_requirements(requirements))
    902 
    903         for dist in needed:

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in resolve(self, requirements, env, installer, replace_conflicting, extras)
    793 
    794             # push the new requirements onto the stack
--> 795             new_requirements = dist.requires(req.extras)[::-1]
    796             requirements.extend(new_requirements)
    797 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in requires(self, extras)
   2744     def requires(self, extras=()):
   2745         """List of Requirements needed for this distro if `extras` are used"""
-> 2746         dm = self._dep_map
   2747         deps = []
   2748         deps.extend(dm.get(None, ()))

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _dep_map(self)
   3031             return self.__dep_map
   3032         except AttributeError:
-> 3033             self.__dep_map = self._compute_dependencies()
   3034             return self.__dep_map
   3035 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _compute_dependencies(self)
   3040         reqs = []
   3041         # Including any condition expressions
-> 3042         for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
   3043             reqs.extend(parse_requirements(req))
   3044 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _parsed_pkg_info(self)
   3022             return self._pkg_info
   3023         except AttributeError:
-> 3024             metadata = self.get_metadata(self.PKG_INFO)
   3025             self._pkg_info = email.parser.Parser().parsestr(metadata)
   3026             return self._pkg_info

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in get_metadata(self, name)
   1420             return ""
   1421         path = self._get_metadata_path(name)
-> 1422         value = self._get(path)
   1423         if six.PY2:
   1424             return value

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _get(self, path)
   1625 
   1626     def _get(self, path):
-> 1627         with open(path, 'rb') as stream:
   1628             return stream.read()
   1629 

FileNotFoundError: [Errno 2] No such file or directory: '/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/tqdm-4.62.3.dist-info/METADATA'
  1. Load Yelp dataset: Manual load of the yelp dataset
    
    ---------------------------------------------------------------------------
    PermissionError                           Traceback (most recent call last)
    /tmp/ipykernel_74829/1089349718.py in <module>
     33 
     34   with tarfile.open(fname) as ft:
    ---> 35     ft.extractall('/root/.cache')
     36   print('Files have been extracted.')

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/tarfile.py in extractall(self, path, members, numeric_owner) 2000 # Do not set_attrs directories, as we will do that further down 2001 self.extract(tarinfo, path, set_attrs=not tarinfo.isdir(), -> 2002 numeric_owner=numeric_owner) 2003 2004 # Reverse sort directories.

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/tarfile.py in extract(self, member, path, set_attrs, numeric_owner) 2042 self._extract_member(tarinfo, os.path.join(path, tarinfo.name), 2043 set_attrs=set_attrs, -> 2044 numeric_owner=numeric_owner) 2045 except OSError as e: 2046 if self.errorlevel > 0:

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/tarfile.py in _extract_member(self, tarinfo, targetpath, set_attrs, numeric_owner) 2104 # Create directories that are not part of the archive with 2105 # default permissions. -> 2106 os.makedirs(upperdirs) 2107 2108 if tarinfo.islnk() or tarinfo.issym():

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/os.py in makedirs(name, mode, exist_ok) 221 return 222 try: --> 223 mkdir(name, mode) 224 except OSError: 225 # Cannot rely on checking for EEXIST, since the operating system

PermissionError: [Errno 13] Permission denied: '/root/.cache'


3. Bonus 1:  Language modeling as pre-training: RuntimeError
```python
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
/tmp/ipykernel_74829/2608392114.py in <module>
     19 # @markdown **NOTE:** *Some pre-trained models might not work well with longer texts!*
     20 
---> 21 generated_responses = generator(input_text, max_length=512, num_return_sequences=num_output_responses)
     22 
     23 print("\n *********** INPUT PROMPT TO THE MODEL ************ \n")

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/pipelines/text_generation.py in __call__(self, text_inputs, **kwargs)
    169               -- The token ids of the generated text.
    170         """
--> 171         return super().__call__(text_inputs, **kwargs)
    172 
    173     def preprocess(self, prompt_text, prefix="", handle_long_generation=None, **generate_kwargs):

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/pipelines/base.py in __call__(self, inputs, num_workers, batch_size, *args, **kwargs)
   1099                 return self.iterate(inputs, preprocess_params, forward_params, postprocess_params)
   1100         else:
-> 1101             return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
   1102 
   1103     def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params):

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/pipelines/base.py in run_single(self, inputs, preprocess_params, forward_params, postprocess_params)
   1106     def run_single(self, inputs, preprocess_params, forward_params, postprocess_params):
   1107         model_inputs = self.preprocess(inputs, **preprocess_params)
-> 1108         model_outputs = self.forward(model_inputs, **forward_params)
   1109         outputs = self.postprocess(model_outputs, **postprocess_params)
   1110         return outputs

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/pipelines/base.py in forward(self, model_inputs, **forward_params)
   1032                 with inference_context():
   1033                     model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device)
-> 1034                     model_outputs = self._forward(model_inputs, **forward_params)
   1035                     model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu"))
   1036             else:

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/pipelines/text_generation.py in _forward(self, model_inputs, **generate_kwargs)
    204             input_ids = None
    205         prompt_text = model_inputs.pop("prompt_text")
--> 206         generated_sequence = self.model.generate(input_ids=input_ids, **generate_kwargs)  # BS x SL
    207         return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
    208 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/torch/autograd/grad_mode.py in decorate_context(*args, **kwargs)
     26         def decorate_context(*args, **kwargs):
     27             with self.__class__():
---> 28                 return func(*args, **kwargs)
     29         return cast(F, decorate_context)
     30 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/generation_utils.py in generate(self, inputs, max_length, min_length, do_sample, early_stopping, num_beams, temperature, top_k, top_p, repetition_penalty, bad_words_ids, bos_token_id, pad_token_id, eos_token_id, length_penalty, no_repeat_ngram_size, encoder_no_repeat_ngram_size, num_return_sequences, max_time, max_new_tokens, decoder_start_token_id, use_cache, num_beam_groups, diversity_penalty, prefix_allowed_tokens_fn, logits_processor, stopping_criteria, output_attentions, output_hidden_states, output_scores, return_dict_in_generate, forced_bos_token_id, forced_eos_token_id, remove_invalid_values, synced_gpus, **model_kwargs)
   1144                 return_dict_in_generate=return_dict_in_generate,
   1145                 synced_gpus=synced_gpus,
-> 1146                 **model_kwargs,
   1147             )
   1148 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/generation_utils.py in sample(self, input_ids, logits_processor, stopping_criteria, logits_warper, max_length, pad_token_id, eos_token_id, output_attentions, output_hidden_states, output_scores, return_dict_in_generate, synced_gpus, **model_kwargs)
   1681             # sample
   1682             probs = nn.functional.softmax(next_token_scores, dim=-1)
-> 1683             next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
   1684 
   1685             # finished sentences should have their next token be a padding token

RuntimeError: Inplace update to inference tensor outside InferenceMode is not allowed.You can make a clone to get a normal tensor before doing inplace update.See https://github.com/pytorch/rfcs/pull/17 for more details.
  1. Bonus 2.3: Fine-tuning: Value Error
    
    ---------------------------------------------------------------------------
    ValueError                                Traceback (most recent call last)
    /tmp/ipykernel_74829/2987933151.py in <module>
     11                                   save_steps=50,
     12                                   logging_steps=10,
    ---> 13                                   report_to="tensorboard"
     14                                   )

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/training_args.py in init(self, output_dir, overwrite_output_dir, do_train, do_eval, do_predict, evaluation_strategy, prediction_loss_only, per_device_train_batch_size, per_device_eval_batch_size, per_gpu_train_batch_size, per_gpu_eval_batch_size, gradient_accumulation_steps, eval_accumulation_steps, learning_rate, weight_decay, adam_beta1, adam_beta2, adam_epsilon, max_grad_norm, num_train_epochs, max_steps, lr_scheduler_type, warmup_ratio, warmup_steps, log_level, log_level_replica, log_on_each_node, logging_dir, logging_strategy, logging_first_step, logging_steps, logging_nan_inf_filter, save_strategy, save_steps, save_total_limit, save_on_each_node, no_cuda, seed, bf16, fp16, fp16_opt_level, half_precision_backend, bf16_full_eval, fp16_full_eval, tf32, local_rank, xpu_backend, tpu_num_cores, tpu_metrics_debug, debug, dataloader_drop_last, eval_steps, dataloader_num_workers, past_index, run_name, disable_tqdm, remove_unused_columns, label_names, load_best_model_at_end, metric_for_best_model, greater_is_better, ignore_data_skip, sharded_ddp, deepspeed, label_smoothing_factor, adafactor, group_by_length, length_column_name, report_to, ddp_find_unused_parameters, ddp_bucket_cap_mb, dataloader_pin_memory, skip_memory_metrics, use_legacy_prediction_loop, push_to_hub, resume_from_checkpoint, hub_model_id, hub_strategy, hub_token, gradient_checkpointing, fp16_backend, push_to_hub_model_id, push_to_hub_organization, push_to_hub_token, mp_parameters)

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/training_args.py in __post_init__(self) 828 ): 829 raise ValueError( --> 830 "Mixed precision training with AMP or APEX (--fp16 or --bf16) and half precision evaluation (--fp16_full_eval or --bf16_full_eval) can only be used on CUDA devices." 831 ) 832

ValueError: Mixed precision training with AMP or APEX (--fp16 or --bf16) and half precision evaluation (--fp16_full_eval or --bf16_full_eval) can only be used on CUDA devices.


5. Bonus 3.1: `Augmenter Class`: AttributeError
```python
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _dep_map(self)
   3030         try:
-> 3031             return self.__dep_map
   3032         except AttributeError:

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in __getattr__(self, attr)
   2824         if attr.startswith('_'):
-> 2825             raise AttributeError(attr)
   2826         return getattr(self._provider, attr)

AttributeError: _DistInfoDistribution__dep_map

During handling of the above exception, another exception occurred:

AttributeError                            Traceback (most recent call last)
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _parsed_pkg_info(self)
   3021         try:
-> 3022             return self._pkg_info
   3023         except AttributeError:

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in __getattr__(self, attr)
   2824         if attr.startswith('_'):
-> 2825             raise AttributeError(attr)
   2826         return getattr(self._provider, attr)

AttributeError: _pkg_info

During handling of the above exception, another exception occurred:

FileNotFoundError                         Traceback (most recent call last)
/tmp/ipykernel_74829/1241122631.py in <module>
      3 ===================
      4 """
----> 5 from textattack.constraints import PreTransformationConstraint
      6 from textattack.shared import AttackedText, utils
      7 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/__init__.py in <module>
      9 TextAttack provides components for common NLP tasks like sentence encoding, grammar-checking, and word replacement that can be used on their own.
     10 """
---> 11 from .attack_args import AttackArgs, CommandLineAttackArgs
     12 from .augment_args import AugmenterArgs
     13 from .dataset_args import DatasetArgs

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_args.py in <module>
     13 from textattack.shared.utils import ARGS_SPLIT_TOKEN, load_module_from_file
     14 
---> 15 from .attack import Attack
     16 from .dataset_args import DatasetArgs
     17 from .model_args import ModelArgs

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack.py in <module>
     11 
     12 import textattack
---> 13 from textattack.attack_results import (
     14     FailedAttackResult,
     15     MaximizedAttackResult,

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/__init__.py in <module>
      6 """
      7 
----> 8 from .attack_result import AttackResult
      9 from .maximized_attack_result import MaximizedAttackResult
     10 from .failed_attack_result import FailedAttackResult

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/attack_result.py in <module>
      8 
      9 from textattack.goal_function_results import GoalFunctionResult
---> 10 from textattack.shared import utils
     11 
     12 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/__init__.py in <module>
     13 from . import validators
     14 
---> 15 from .attacked_text import AttackedText
     16 from .word_embeddings import AbstractWordEmbedding, WordEmbedding, GensimWordEmbedding
     17 from .checkpoint import AttackCheckpoint

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/attacked_text.py in <module>
     10 import math
     11 
---> 12 import flair
     13 from flair.data import Sentence
     14 import numpy as np

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/__init__.py in <module>
     18 
     19 from . import data
---> 20 from . import models
     21 from . import visual
     22 from . import trainers

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/models/__init__.py in <module>
----> 1 from .sequence_tagger_model import SequenceTagger, MultiTagger
      2 from .language_model import LanguageModel
      3 from .text_classification_model import TextClassifier
      4 from .pairwise_classification_model import TextPairClassifier
      5 from .relation_extractor_model import RelationExtractor

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/models/sequence_tagger_model.py in <module>
     15 from tqdm import tqdm
     16 
---> 17 import flair.nn
     18 from flair.data import Dictionary, Sentence, Label
     19 from flair.datasets import SentenceDataset, DataLoader

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/nn/__init__.py in <module>
      1 from .dropout import LockedDropout, WordDropout
----> 2 from .model import Model, Classifier, DefaultClassifier

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/nn/model.py in <module>
     14 from flair import file_utils
     15 from flair.data import DataPoint, Sentence, Dictionary, SpanLabel
---> 16 from flair.datasets import DataLoader, SentenceDataset
     17 from flair.training_utils import Result, store_embeddings
     18 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/datasets/__init__.py in <module>
    275 
    276 # Expose all relation extraction datasets
--> 277 from .relation_extraction import RE_ENGLISH_SEMEVAL2010
    278 from .relation_extraction import RE_ENGLISH_TACRED
    279 from .relation_extraction import RE_ENGLISH_CONLL04

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/flair/datasets/relation_extraction.py in <module>
     10 
     11 import conllu
---> 12 import gdown
     13 
     14 import flair

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/gdown/__init__.py in <module>
      9 
     10 __author__ = "Kentaro Wada <www.kentaro.wada@gmail.com>"
---> 11 __version__ = pkg_resources.get_distribution("gdown").version

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in get_distribution(dist)
    480         dist = Requirement.parse(dist)
    481     if isinstance(dist, Requirement):
--> 482         dist = get_provider(dist)
    483     if not isinstance(dist, Distribution):
    484         raise TypeError("Expected string, Requirement, or Distribution", dist)

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in get_provider(moduleOrReq)
    356     """Return an IResourceProvider for the named module or requirement"""
    357     if isinstance(moduleOrReq, Requirement):
--> 358         return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
    359     try:
    360         module = sys.modules[moduleOrReq]

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in require(self, *requirements)
    899         included, even if they were already activated in this working set.
    900         """
--> 901         needed = self.resolve(parse_requirements(requirements))
    902 
    903         for dist in needed:

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in resolve(self, requirements, env, installer, replace_conflicting, extras)
    793 
    794             # push the new requirements onto the stack
--> 795             new_requirements = dist.requires(req.extras)[::-1]
    796             requirements.extend(new_requirements)
    797 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in requires(self, extras)
   2744     def requires(self, extras=()):
   2745         """List of Requirements needed for this distro if `extras` are used"""
-> 2746         dm = self._dep_map
   2747         deps = []
   2748         deps.extend(dm.get(None, ()))

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _dep_map(self)
   3031             return self.__dep_map
   3032         except AttributeError:
-> 3033             self.__dep_map = self._compute_dependencies()
   3034             return self.__dep_map
   3035 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _compute_dependencies(self)
   3040         reqs = []
   3041         # Including any condition expressions
-> 3042         for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
   3043             reqs.extend(parse_requirements(req))
   3044 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _parsed_pkg_info(self)
   3022             return self._pkg_info
   3023         except AttributeError:
-> 3024             metadata = self.get_metadata(self.PKG_INFO)
   3025             self._pkg_info = email.parser.Parser().parsestr(metadata)
   3026             return self._pkg_info

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in get_metadata(self, name)
   1420             return ""
   1421         path = self._get_metadata_path(name)
-> 1422         value = self._get(path)
   1423         if six.PY2:
   1424             return value

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/pkg_resources/__init__.py in _get(self, path)
   1625 
   1626     def _get(self, path):
-> 1627         with open(path, 'rb') as stream:
   1628             return stream.read()
   1629 

FileNotFoundError: [Errno 2] No such file or directory: '/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/urllib3-1.26.7.dist-info/METADATA'
spirosChv commented 2 years ago

2, 3, and 5 have been resolved in https://github.com/NeuromatchAcademy/course-content-dl/pull/554

spirosChv commented 2 years ago

Bonus 3.1: ImportError

ImportError                               Traceback (most recent call last)
/tmp/ipykernel_75600/1241122631.py in <module>
      3 ===================
      4 """
----> 5 from textattack.constraints import PreTransformationConstraint
      6 from textattack.shared import AttackedText, utils
      7 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/__init__.py in <module>
      9 TextAttack provides components for common NLP tasks like sentence encoding, grammar-checking, and word replacement that can be used on their own.
     10 """
---> 11 from .attack_args import AttackArgs, CommandLineAttackArgs
     12 from .augment_args import AugmenterArgs
     13 from .dataset_args import DatasetArgs

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_args.py in <module>
     13 from textattack.shared.utils import ARGS_SPLIT_TOKEN, load_module_from_file
     14 
---> 15 from .attack import Attack
     16 from .dataset_args import DatasetArgs
     17 from .model_args import ModelArgs

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack.py in <module>
     11 
     12 import textattack
---> 13 from textattack.attack_results import (
     14     FailedAttackResult,
     15     MaximizedAttackResult,

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/__init__.py in <module>
      6 """
      7 
----> 8 from .attack_result import AttackResult
      9 from .maximized_attack_result import MaximizedAttackResult
     10 from .failed_attack_result import FailedAttackResult

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/attack_result.py in <module>
      8 
      9 from textattack.goal_function_results import GoalFunctionResult
---> 10 from textattack.shared import utils
     11 
     12 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/__init__.py in <module>
     15 from .attacked_text import AttackedText
     16 from .word_embeddings import AbstractWordEmbedding, WordEmbedding, GensimWordEmbedding
---> 17 from .checkpoint import AttackCheckpoint

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/checkpoint.py in <module>
     12 
     13 import textattack
---> 14 from textattack.attack_results import (
     15     FailedAttackResult,
     16     MaximizedAttackResult,

ImportError: cannot import name 'FailedAttackResult' from 'textattack.attack_results' (/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/__init__.py)

spirosChv commented 2 years ago

textattack import error is a reported issue for kaggle. See this https://github.com/QData/TextAttack/issues/584. The code is running on colab.

spirosChv commented 2 years ago

https://github.com/NeuromatchAcademy/course-content-dl/pull/580 resolved the issue on kaggle

spirosChv commented 2 years ago

tensorboard is missing from the Install dependencies, thus an error occurs in the book.

spirosChv commented 2 years ago

7. Bonus 2.3: Fine-tuning: Value Error

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
/tmp/ipykernel_74829/2987933151.py in <module>
     11                                   save_steps=50,
     12                                   logging_steps=10,
---> 13                                   report_to="tensorboard"
     14                                   )

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/training_args.py in __init__(self, output_dir, overwrite_output_dir, do_train, do_eval, do_predict, evaluation_strategy, prediction_loss_only, per_device_train_batch_size, per_device_eval_batch_size, per_gpu_train_batch_size, per_gpu_eval_batch_size, gradient_accumulation_steps, eval_accumulation_steps, learning_rate, weight_decay, adam_beta1, adam_beta2, adam_epsilon, max_grad_norm, num_train_epochs, max_steps, lr_scheduler_type, warmup_ratio, warmup_steps, log_level, log_level_replica, log_on_each_node, logging_dir, logging_strategy, logging_first_step, logging_steps, logging_nan_inf_filter, save_strategy, save_steps, save_total_limit, save_on_each_node, no_cuda, seed, bf16, fp16, fp16_opt_level, half_precision_backend, bf16_full_eval, fp16_full_eval, tf32, local_rank, xpu_backend, tpu_num_cores, tpu_metrics_debug, debug, dataloader_drop_last, eval_steps, dataloader_num_workers, past_index, run_name, disable_tqdm, remove_unused_columns, label_names, load_best_model_at_end, metric_for_best_model, greater_is_better, ignore_data_skip, sharded_ddp, deepspeed, label_smoothing_factor, adafactor, group_by_length, length_column_name, report_to, ddp_find_unused_parameters, ddp_bucket_cap_mb, dataloader_pin_memory, skip_memory_metrics, use_legacy_prediction_loop, push_to_hub, resume_from_checkpoint, hub_model_id, hub_strategy, hub_token, gradient_checkpointing, fp16_backend, push_to_hub_model_id, push_to_hub_organization, push_to_hub_token, mp_parameters)

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/training_args.py in __post_init__(self)
    828         ):
    829             raise ValueError(
--> 830                 "Mixed precision training with AMP or APEX (`--fp16` or `--bf16`) and half precision evaluation (`--fp16_full_eval` or `--bf16_full_eval`) can only be used on CUDA devices."
    831             )
    832 

ValueError: Mixed precision training with AMP or APEX (`--fp16` or `--bf16`) and half precision evaluation (`--fp16_full_eval` or `--bf16_full_eval`) can only be used on CUDA devices.

This issue has been resolved by adding:

fp16=False if DEVICE=='cpu' else True

in Bonus 2.3 TrainingArguments.

spirosChv commented 2 years ago

Bonus 3.1: ImportError

ImportError                               Traceback (most recent call last)
/tmp/ipykernel_75600/1241122631.py in <module>
      3 ===================
      4 """
----> 5 from textattack.constraints import PreTransformationConstraint
      6 from textattack.shared import AttackedText, utils
      7 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/__init__.py in <module>
      9 TextAttack provides components for common NLP tasks like sentence encoding, grammar-checking, and word replacement that can be used on their own.
     10 """
---> 11 from .attack_args import AttackArgs, CommandLineAttackArgs
     12 from .augment_args import AugmenterArgs
     13 from .dataset_args import DatasetArgs

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_args.py in <module>
     13 from textattack.shared.utils import ARGS_SPLIT_TOKEN, load_module_from_file
     14 
---> 15 from .attack import Attack
     16 from .dataset_args import DatasetArgs
     17 from .model_args import ModelArgs

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack.py in <module>
     11 
     12 import textattack
---> 13 from textattack.attack_results import (
     14     FailedAttackResult,
     15     MaximizedAttackResult,

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/__init__.py in <module>
      6 """
      7 
----> 8 from .attack_result import AttackResult
      9 from .maximized_attack_result import MaximizedAttackResult
     10 from .failed_attack_result import FailedAttackResult

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/attack_result.py in <module>
      8 
      9 from textattack.goal_function_results import GoalFunctionResult
---> 10 from textattack.shared import utils
     11 
     12 

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/__init__.py in <module>
     15 from .attacked_text import AttackedText
     16 from .word_embeddings import AbstractWordEmbedding, WordEmbedding, GensimWordEmbedding
---> 17 from .checkpoint import AttackCheckpoint

/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/checkpoint.py in <module>
     12 
     13 import textattack
---> 14 from textattack.attack_results import (
     15     FailedAttackResult,
     16     MaximizedAttackResult,

ImportError: cannot import name 'FailedAttackResult' from 'textattack.attack_results' (/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/__init__.py)

Fixed in https://github.com/NeuromatchAcademy/course-content-dl/pull/580

spirosChv commented 2 years ago

Fixed with this workflow run https://github.com/NeuromatchAcademy/course-content-dl/actions/runs/1697719525