start training...
+++++++++++
Epoch: 0
+++++++++++
train ent1s num: 4500 train ent2s num: 4500 for_Candidate_ent1s num: 15000 for_candidate_ent2s num: 15000
D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn_reduction.py:46: UserWarning: size_average and reduce args will be deprecated, please use reduction='mean' instead.
warnings.warn(warning.format(ret))
Traceback (most recent call last):
File "D:/实验室相关的内容/bert-int/bert-int-master/basic_bert_unit/main.py", line 75, in
main()
File "D:/实验室相关的内容/bert-int/bert-int-master/basic_bert_unit/main.py", line 70, in main
train(Model,Criterion,Optimizer,Train_gene,train_ill,test_ill,ent2data)
File "D:\实验室相关的内容\bert-int\bert-int-master\basic_bert_unit\train_func.py", line 115, in train
for_candidate_ent2s,entid2data,Train_gene.index2entity)
File "D:\实验室相关的内容\bert-int\bert-int-master\basic_bert_unit\train_func.py", line 44, in generate_candidate_dict
temp_emb = entlist2emb(Model,train_ent1s[i:i+batch_size],entid2data,CUDA_NUM).cpu().tolist()
File "D:\实验室相关的内容\bert-int\bert-int-master\basic_bert_unit\train_func.py", line 26, in entlist2emb
batch_emb = Model(batch_token_ids,batch_mask_ids)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, kwargs)
File "D:\实验室相关的内容\bert-int\bert-int-master\basic_bert_unit\Basic_Bert_Unit_model.py", line 20, in forward
x = self.bert_model(input_ids = batch_word_list,attention_mask = attention_mask)#token_type_ids =token_type_ids
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, *kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 627, in forward
head_mask=head_mask)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(input, kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 348, in forward
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 326, in forward
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, *kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 283, in forward
self_outputs = self.self(input_tensor, attention_mask, head_mask)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(input, kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 211, in forward
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
RuntimeError: cublas runtime error : the GPU program failed to execute at C:/w/1/s/windows/pytorch/aten/src/THC/THCBlas.cu:450
start training... +++++++++++ Epoch: 0 +++++++++++ train ent1s num: 4500 train ent2s num: 4500 for_Candidate_ent1s num: 15000 for_candidate_ent2s num: 15000 D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn_reduction.py:46: UserWarning: size_average and reduce args will be deprecated, please use reduction='mean' instead. warnings.warn(warning.format(ret)) Traceback (most recent call last): File "D:/实验室相关的内容/bert-int/bert-int-master/basic_bert_unit/main.py", line 75, in
main()
File "D:/实验室相关的内容/bert-int/bert-int-master/basic_bert_unit/main.py", line 70, in main
train(Model,Criterion,Optimizer,Train_gene,train_ill,test_ill,ent2data)
File "D:\实验室相关的内容\bert-int\bert-int-master\basic_bert_unit\train_func.py", line 115, in train
for_candidate_ent2s,entid2data,Train_gene.index2entity)
File "D:\实验室相关的内容\bert-int\bert-int-master\basic_bert_unit\train_func.py", line 44, in generate_candidate_dict
temp_emb = entlist2emb(Model,train_ent1s[i:i+batch_size],entid2data,CUDA_NUM).cpu().tolist()
File "D:\实验室相关的内容\bert-int\bert-int-master\basic_bert_unit\train_func.py", line 26, in entlist2emb
batch_emb = Model(batch_token_ids,batch_mask_ids)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, kwargs)
File "D:\实验室相关的内容\bert-int\bert-int-master\basic_bert_unit\Basic_Bert_Unit_model.py", line 20, in forward
x = self.bert_model(input_ids = batch_word_list,attention_mask = attention_mask)#token_type_ids =token_type_ids
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, *kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 627, in forward
head_mask=head_mask)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(input, kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 348, in forward
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 326, in forward
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, *kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 283, in forward
self_outputs = self.self(input_tensor, attention_mask, head_mask)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(input, kwargs)
File "D:\Anaconda\envs\bert-int3\lib\site-packages\transformers\modeling_bert.py", line 211, in forward
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
RuntimeError: cublas runtime error : the GPU program failed to execute at C:/w/1/s/windows/pytorch/aten/src/THC/THCBlas.cu:450