Open wangbofan opened 1 year ago
python DataLoadAndTrain.py --LOSS_alpha=1 --lr=1e-5 --l2=1e-5 --early_stop=5 --PreTrain_Model="Gpt2" --batch_size=16 2023-03-01 10:52:17.583697: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libnvinfer.so.6'; dlerror: libnvinfer.so.6: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/nvidia/lib/:/usr/local/nvidia/lib64/ 2023-03-01 10:52:17.583823: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libnvinfer_plugin.so.6'; dlerror: libnvinfer_plugin.so.6: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/nvidia/lib/:/usr/local/nvidia/lib64/ 2023-03-01 10:52:17.583840: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:30] Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly. ==================== 超参 ==================== PreTrainModel = ['Bert_large', 'Gpt', 'Gpt2', 'Ctrl', 'TransfoXL', 'Xlnet_base', 'Xlnet_large', 'XLM', 'DistilBert_base', 'DistilBert_large', 'Roberta_base', 'Roberta_large', 'XLMRoberta_base', 'XLMRoberta_large', 'ALBERT-base-v1', 'ALBERT-large-v1', 'ALBERT-xlarge-v1', 'ALBERT-xxlarge-v1', 'ALBERT-base-v2', 'ALBERT-large-v2', 'ALBERT-xlarge-v2', 'ALBERT-xxlarge-v2'] early_stop = 5 lr = 1e-05 l2 = 1e-05 n_epochs = 50 logdir = logdir trainset = data/train.json devset = data/dev.json testset = data/test.json LOSS_alpha = 1.0 telegram_bot_token = telegram_chat_id = PreTrain_Model = Gpt2 model_path = Transformer-based-pretrained-model-for-event-extraction-master/save_model/latest_model.pt batch_size = 16 Segmentation fault (core dumped) (base) root@dl-230215085122e58-pod-jupyter-575dd95b75-wct2x:~/Transformer-based-pretrained-model-for-event-extraction# 为什么会出现这个问题呢
已经解决了 pip install sentencepiece==0.1.91 pip install torch==1.5.0
python DataLoadAndTrain.py --LOSS_alpha=1 --lr=1e-5 --l2=1e-5 --early_stop=5 --PreTrain_Model="Gpt2" --batch_size=16 2023-03-01 10:52:17.583697: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libnvinfer.so.6'; dlerror: libnvinfer.so.6: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/nvidia/lib/:/usr/local/nvidia/lib64/ 2023-03-01 10:52:17.583823: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libnvinfer_plugin.so.6'; dlerror: libnvinfer_plugin.so.6: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/nvidia/lib/:/usr/local/nvidia/lib64/ 2023-03-01 10:52:17.583840: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:30] Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly. ==================== 超参 ==================== PreTrainModel = ['Bert_large', 'Gpt', 'Gpt2', 'Ctrl', 'TransfoXL', 'Xlnet_base', 'Xlnet_large', 'XLM', 'DistilBert_base', 'DistilBert_large', 'Roberta_base', 'Roberta_large', 'XLMRoberta_base', 'XLMRoberta_large', 'ALBERT-base-v1', 'ALBERT-large-v1', 'ALBERT-xlarge-v1', 'ALBERT-xxlarge-v1', 'ALBERT-base-v2', 'ALBERT-large-v2', 'ALBERT-xlarge-v2', 'ALBERT-xxlarge-v2'] early_stop = 5 lr = 1e-05 l2 = 1e-05 n_epochs = 50 logdir = logdir trainset = data/train.json devset = data/dev.json testset = data/test.json LOSS_alpha = 1.0 telegram_bot_token = telegram_chat_id = PreTrain_Model = Gpt2 model_path = Transformer-based-pretrained-model-for-event-extraction-master/save_model/latest_model.pt batch_size = 16 Segmentation fault (core dumped) (base) root@dl-230215085122e58-pod-jupyter-575dd95b75-wct2x:~/Transformer-based-pretrained-model-for-event-extraction# 为什么会出现这个问题呢