Closed XM-WANG closed 4 years ago
Hi~~ This doesn't happen before, please try to: 1) provide me a full running log 2) check whether the script is correctly set for crf-based model ~ 3) use the newer version: https://github.com/AtmaHou/MetaDialog
Thanks for your reply! The whole log is shown below.
I just tried to run main.py
directly and gave it some necessary args parameters and I got the errors informed me that no attribute 'train_trans_mat'. Then I searched it in utils/opt.py
and found this parameter actually doesn't exist. I am not sure whether I config it properly.
Args:
{
"train_path": "/wangxiaoming path/FewShotTagging/ACL2020data/xval_ner_shot_5/ner-train-1-shot-5.json",
"dev_path": "/wangxiaoming path/FewShotTagging/ACL2020data/xval_ner_shot_5/ner-train-1-shot-5.json",
"test_path": null,
"eval_script": "./scripts/conlleval.pl",
"bert_path": "/wangxiaoming path/FewShotTagging/uncased_L-12_H-768_A-12/",
"bert_vocab": "/wangxiaoming path/FewShotTagging/uncased_L-12_H-768_A-12/vocab.txt",
"output_dir": "outputdirtest",
"saved_model_path": "",
"embedding_cache": "/users4/ythou/Projects/Homework/ComputationalSemantic/.word_vectors_cache",
"allow_override": false,
"load_feature": false,
"save_feature": false,
"do_train": true,
"do_predict": false,
"do_debug": false,
"do_overfit_test": false,
"verbose": false,
"seed": 42,
"local_rank": -1,
"no_cuda": false,
"cpt_per_epoch": 2,
"convergence_window": 30000,
"convergence_dev_num": 5,
"train_batch_size": 2,
"learning_rate": 5e-05,
"num_train_epochs": 20,
"warmup_proportion": 0.1,
"eval_when_train": false,
"gradient_accumulation_steps": 1,
"optimize_on_cpu": false,
"fp16": false,
"loss_scale": 128,
"delete_checkpoint": false,
"clip_grad": -1,
"decay_lr": -1,
"decay_epoch_size": 1,
"warmup_epoch": -1,
"upper_lr": -1,
"no_embedder_grad": false,
"fix_embed_epoch": -1,
"train_label_mask": null,
"test_batch_size": 2,
"test_on_cpu": false,
"sim_annotation": "match",
"bio_text_omit": false,
"separate_prj": false,
"projection_layer": "none",
"context_emb": "bert",
"similarity": "dot",
"emb_dim": 64,
"label_reps": "sep",
"use_schema": false,
"decoder": "crf",
"emission": "mnet",
"emission_normalizer": "",
"emission_scaler": null,
"ems_scale_r": 1,
"ple_normalizer": "",
"ple_scaler": null,
"ple_scale_r": 1,
"tap_random_init": false,
"tap_random_init_r": 1,
"tap_mlp": false,
"tap_mlp_out_dim": 768,
"tap_proto": false,
"tap_proto_r": 1,
"proj_dim": 0,
"emb_log": false,
"div_by_tag_num": false,
"transition": "learn",
"trans_normalizer": "",
"trans_scaler": null,
"backoff_init": "rand",
"trans_r": 1,
"trans_scale_r": 1,
"label_trans_normalizer": "",
"label_trans_scaler": "fix",
"label_trans_scale_r": 1,
"mask_transition": false,
"add_transition_rules": false,
"loss_func": "cross_entropy"
}
10/04/2020 20:04:00 - INFO - utils.device_helper - device: cuda n_gpu: 2, distributed training: False, 16-bits trainiing: False
10/04/2020 20:04:00 - INFO - main - Environment: device cuda, n_gpu 2
10/04/2020 20:04:00 - INFO - pytorch_pretrained_bert.tokenization - loading vocabulary file /misc/kfdata01/kf_grp/zzwang/station/FewShotTagging/uncased_L-12_H-768_A-12/vocab.txt
10/04/2020 20:04:00 - INFO - main - Finish train dev prepare dict
10/04/2020 20:04:13 - INFO - main - Finish prepare train dev features
10/04/2020 20:04:13 - INFO - main - Perform training
Traceback (most recent call last):
File "main.py", line 218, in
When you run crf-based model, please try to set mask_transition == True.
In scripts, you need set mask_trans=-mk_tr
.
When you run crf-based model, please try to set mask_transition == True. In scripts, you need set
mask_trans=-mk_tr
.
Thanks! It works.
When you run crf-based model, please try to set mask_transition == True. In scripts, you need set
mask_trans=-mk_tr
.
Thanks!
File "./utils/model_helper.py", line 44, in make_model trans_mat = opt.train_trans_mat AttributeError: 'Namespace' object has no attribute 'train_trans_mat'