Open yongzx opened 2 years ago
Script 1: How to increase max_input_length and max_target_length? How to use mt5-xl?
Working script for mt5_large
using ds_config_zero3.json.
CUR_TIME=`date +%H%M_%m%d%Y`
deepspeed \
/users/zyong2/data/zyong2/mt0/data/external/mt0/multilingual_t0/main.py \
--deepspeed "/users/zyong2/data/zyong2/mt0/data/external/mt0/multilingual_t0/ds_config_zero3.json" \
--model_name_or_path "google/mt5-large" \
--cache_dir "/users/zyong2/data/zyong2/huggingface/mt5_xl" \
--dataset_name "mc4" \
--max_input_length 512 \
--max_target_length 512 \
--do_train \
--preprocessing_num_workers 4 \
--per_device_train_batch_size 1 \
--gradient_accumulation 8 \
--overwrite_output_dir \
--output_dir "/users/zyong2/data/zyong2/mt0/data/processed/001/mt5_xxl" \
--max_steps 100 \
--per_device_eval_batch_size 1 \
--do_eval \
--evaluation_strategy "steps" \
--eval_steps 10 \
--logging_dir "/users/zyong2/data/zyong2/mt0/data/processed/001/runs/mt5_xxl_${CUR_TIME}"\
--logging_strategy "steps" \
--logging_steps 10 \
--report_to "tensorboard"
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 3,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"offload_param": {
"device": "cpu",
"pin_memory": true
},
"overlap_comm": true,
"contiguous_gradients": true,
"sub_group_size": 1e14,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": 1e9,
"stage3_max_reuse_distance": 1e9,
"stage3_gather_fp16_weights_on_model_save": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
2 V100s, mT5 + 512-512 with Zerov3
CUR_TIME=`date +%H%M_%m%d%Y`
deepspeed \
/users/zyong2/data/zyong2/mt0/data/external/mt0/multilingual_t0/main.py \
--model_name_or_path "google/mt5-xl" \
--cache_dir "/users/zyong2/data/zyong2/huggingface/mt5_xl" \
--dataset_name "mc4" \
--max_input_length 512 \
--max_target_length 512 \
--do_train \
--preprocessing_num_workers 4 \
--per_device_train_batch_size 1 \
--gradient_accumulation 8 \
--overwrite_output_dir \
--output_dir "/users/zyong2/data/zyong2/mt0/data/processed/001/mt5_xl" \
--max_steps 100 \
--per_device_eval_batch_size 1 \
--do_eval \
--evaluation_strategy "steps" \
--eval_steps 10 \
--logging_dir "/users/zyong2/data/zyong2/mt0/data/processed/001/runs/mt5_xl_${CUR_TIME}"\
--logging_strategy "steps" \
--logging_steps 10 \
--report_to "tensorboard" \
--deepspeed "/users/zyong2/data/zyong2/mt0/data/external/mt0/multilingual_t0/ds_config_zero3.json"
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 3,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"offload_param": {
"device": "cpu",
"pin_memory": true
},
"overlap_comm": true,
"contiguous_gradients": true,
"sub_group_size": 1e9,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": 1e9,
"stage3_max_reuse_distance": 1e9,
"stage3_gather_fp16_weights_on_model_save": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
Specs