Closed spirosChv closed 2 years ago
2, 3, and 5 have been resolved in https://github.com/NeuromatchAcademy/course-content-dl/pull/554
Bonus 3.1: ImportError
ImportError Traceback (most recent call last)
/tmp/ipykernel_75600/1241122631.py in <module>
3 ===================
4 """
----> 5 from textattack.constraints import PreTransformationConstraint
6 from textattack.shared import AttackedText, utils
7
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/__init__.py in <module>
9 TextAttack provides components for common NLP tasks like sentence encoding, grammar-checking, and word replacement that can be used on their own.
10 """
---> 11 from .attack_args import AttackArgs, CommandLineAttackArgs
12 from .augment_args import AugmenterArgs
13 from .dataset_args import DatasetArgs
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_args.py in <module>
13 from textattack.shared.utils import ARGS_SPLIT_TOKEN, load_module_from_file
14
---> 15 from .attack import Attack
16 from .dataset_args import DatasetArgs
17 from .model_args import ModelArgs
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack.py in <module>
11
12 import textattack
---> 13 from textattack.attack_results import (
14 FailedAttackResult,
15 MaximizedAttackResult,
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/__init__.py in <module>
6 """
7
----> 8 from .attack_result import AttackResult
9 from .maximized_attack_result import MaximizedAttackResult
10 from .failed_attack_result import FailedAttackResult
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/attack_result.py in <module>
8
9 from textattack.goal_function_results import GoalFunctionResult
---> 10 from textattack.shared import utils
11
12
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/__init__.py in <module>
15 from .attacked_text import AttackedText
16 from .word_embeddings import AbstractWordEmbedding, WordEmbedding, GensimWordEmbedding
---> 17 from .checkpoint import AttackCheckpoint
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/checkpoint.py in <module>
12
13 import textattack
---> 14 from textattack.attack_results import (
15 FailedAttackResult,
16 MaximizedAttackResult,
ImportError: cannot import name 'FailedAttackResult' from 'textattack.attack_results' (/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/__init__.py)
textattack
import error is a reported issue for kaggle. See this https://github.com/QData/TextAttack/issues/584. The code is running on colab.
https://github.com/NeuromatchAcademy/course-content-dl/pull/580 resolved the issue on kaggle
tensorboard
is missing from the Install dependencies, thus an error occurs in the book.
7. Bonus 2.3: Fine-tuning: Value Error
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) /tmp/ipykernel_74829/2987933151.py in <module> 11 save_steps=50, 12 logging_steps=10, ---> 13 report_to="tensorboard" 14 ) /opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/training_args.py in __init__(self, output_dir, overwrite_output_dir, do_train, do_eval, do_predict, evaluation_strategy, prediction_loss_only, per_device_train_batch_size, per_device_eval_batch_size, per_gpu_train_batch_size, per_gpu_eval_batch_size, gradient_accumulation_steps, eval_accumulation_steps, learning_rate, weight_decay, adam_beta1, adam_beta2, adam_epsilon, max_grad_norm, num_train_epochs, max_steps, lr_scheduler_type, warmup_ratio, warmup_steps, log_level, log_level_replica, log_on_each_node, logging_dir, logging_strategy, logging_first_step, logging_steps, logging_nan_inf_filter, save_strategy, save_steps, save_total_limit, save_on_each_node, no_cuda, seed, bf16, fp16, fp16_opt_level, half_precision_backend, bf16_full_eval, fp16_full_eval, tf32, local_rank, xpu_backend, tpu_num_cores, tpu_metrics_debug, debug, dataloader_drop_last, eval_steps, dataloader_num_workers, past_index, run_name, disable_tqdm, remove_unused_columns, label_names, load_best_model_at_end, metric_for_best_model, greater_is_better, ignore_data_skip, sharded_ddp, deepspeed, label_smoothing_factor, adafactor, group_by_length, length_column_name, report_to, ddp_find_unused_parameters, ddp_bucket_cap_mb, dataloader_pin_memory, skip_memory_metrics, use_legacy_prediction_loop, push_to_hub, resume_from_checkpoint, hub_model_id, hub_strategy, hub_token, gradient_checkpointing, fp16_backend, push_to_hub_model_id, push_to_hub_organization, push_to_hub_token, mp_parameters) /opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/training_args.py in __post_init__(self) 828 ): 829 raise ValueError( --> 830 "Mixed precision training with AMP or APEX (`--fp16` or `--bf16`) and half precision evaluation (`--fp16_full_eval` or `--bf16_full_eval`) can only be used on CUDA devices." 831 ) 832 ValueError: Mixed precision training with AMP or APEX (`--fp16` or `--bf16`) and half precision evaluation (`--fp16_full_eval` or `--bf16_full_eval`) can only be used on CUDA devices.
This issue has been resolved by adding:
fp16=False if DEVICE=='cpu' else True
in Bonus 2.3 TrainingArguments
.
Bonus 3.1:
ImportError
ImportError Traceback (most recent call last) /tmp/ipykernel_75600/1241122631.py in <module> 3 =================== 4 """ ----> 5 from textattack.constraints import PreTransformationConstraint 6 from textattack.shared import AttackedText, utils 7 /opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/__init__.py in <module> 9 TextAttack provides components for common NLP tasks like sentence encoding, grammar-checking, and word replacement that can be used on their own. 10 """ ---> 11 from .attack_args import AttackArgs, CommandLineAttackArgs 12 from .augment_args import AugmenterArgs 13 from .dataset_args import DatasetArgs /opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_args.py in <module> 13 from textattack.shared.utils import ARGS_SPLIT_TOKEN, load_module_from_file 14 ---> 15 from .attack import Attack 16 from .dataset_args import DatasetArgs 17 from .model_args import ModelArgs /opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack.py in <module> 11 12 import textattack ---> 13 from textattack.attack_results import ( 14 FailedAttackResult, 15 MaximizedAttackResult, /opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/__init__.py in <module> 6 """ 7 ----> 8 from .attack_result import AttackResult 9 from .maximized_attack_result import MaximizedAttackResult 10 from .failed_attack_result import FailedAttackResult /opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/attack_result.py in <module> 8 9 from textattack.goal_function_results import GoalFunctionResult ---> 10 from textattack.shared import utils 11 12 /opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/__init__.py in <module> 15 from .attacked_text import AttackedText 16 from .word_embeddings import AbstractWordEmbedding, WordEmbedding, GensimWordEmbedding ---> 17 from .checkpoint import AttackCheckpoint /opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/shared/checkpoint.py in <module> 12 13 import textattack ---> 14 from textattack.attack_results import ( 15 FailedAttackResult, 16 MaximizedAttackResult, ImportError: cannot import name 'FailedAttackResult' from 'textattack.attack_results' (/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/textattack/attack_results/__init__.py)
Fixed in https://github.com/NeuromatchAcademy/course-content-dl/pull/580
Fixed with this workflow run https://github.com/NeuromatchAcademy/course-content-dl/actions/runs/1697719525
Several Errors appear in the book:
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/tarfile.py in extractall(self, path, members, numeric_owner) 2000 # Do not set_attrs directories, as we will do that further down 2001 self.extract(tarinfo, path, set_attrs=not tarinfo.isdir(), -> 2002 numeric_owner=numeric_owner) 2003 2004 # Reverse sort directories.
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/tarfile.py in extract(self, member, path, set_attrs, numeric_owner) 2042 self._extract_member(tarinfo, os.path.join(path, tarinfo.name), 2043 set_attrs=set_attrs, -> 2044 numeric_owner=numeric_owner) 2045 except OSError as e: 2046 if self.errorlevel > 0:
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/tarfile.py in _extract_member(self, tarinfo, targetpath, set_attrs, numeric_owner) 2104 # Create directories that are not part of the archive with 2105 # default permissions. -> 2106 os.makedirs(upperdirs) 2107 2108 if tarinfo.islnk() or tarinfo.issym():
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/os.py in makedirs(name, mode, exist_ok) 221 return 222 try: --> 223 mkdir(name, mode) 224 except OSError: 225 # Cannot rely on checking for EEXIST, since the operating system
PermissionError: [Errno 13] Permission denied: '/root/.cache'
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/training_args.py in init(self, output_dir, overwrite_output_dir, do_train, do_eval, do_predict, evaluation_strategy, prediction_loss_only, per_device_train_batch_size, per_device_eval_batch_size, per_gpu_train_batch_size, per_gpu_eval_batch_size, gradient_accumulation_steps, eval_accumulation_steps, learning_rate, weight_decay, adam_beta1, adam_beta2, adam_epsilon, max_grad_norm, num_train_epochs, max_steps, lr_scheduler_type, warmup_ratio, warmup_steps, log_level, log_level_replica, log_on_each_node, logging_dir, logging_strategy, logging_first_step, logging_steps, logging_nan_inf_filter, save_strategy, save_steps, save_total_limit, save_on_each_node, no_cuda, seed, bf16, fp16, fp16_opt_level, half_precision_backend, bf16_full_eval, fp16_full_eval, tf32, local_rank, xpu_backend, tpu_num_cores, tpu_metrics_debug, debug, dataloader_drop_last, eval_steps, dataloader_num_workers, past_index, run_name, disable_tqdm, remove_unused_columns, label_names, load_best_model_at_end, metric_for_best_model, greater_is_better, ignore_data_skip, sharded_ddp, deepspeed, label_smoothing_factor, adafactor, group_by_length, length_column_name, report_to, ddp_find_unused_parameters, ddp_bucket_cap_mb, dataloader_pin_memory, skip_memory_metrics, use_legacy_prediction_loop, push_to_hub, resume_from_checkpoint, hub_model_id, hub_strategy, hub_token, gradient_checkpointing, fp16_backend, push_to_hub_model_id, push_to_hub_organization, push_to_hub_token, mp_parameters)
/opt/hostedtoolcache/Python/3.7.12/x64/lib/python3.7/site-packages/transformers/training_args.py in __post_init__(self) 828 ): 829 raise ValueError( --> 830 "Mixed precision training with AMP or APEX (
--fp16
or--bf16
) and half precision evaluation (--fp16_full_eval
or--bf16_full_eval
) can only be used on CUDA devices." 831 ) 832ValueError: Mixed precision training with AMP or APEX (
--fp16
or--bf16
) and half precision evaluation (--fp16_full_eval
or--bf16_full_eval
) can only be used on CUDA devices.