@inproceedings{
wei2018fast,
title={Fast and Accurate Reading Comprehension by Combining Self-Attention and Convolution},
author={Adams Wei Yu and David Dohan and Quoc Le and Thang Luong and Rui Zhao and Kai Chen},
booktitle={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=B14TlG-RW},
}
title
QANet: Combining Local Convolution with Global Self-Attention for Reading Comprehension
notes
模型大杂烩,疯狂堆参数。用CNN+self attention不用RNN等循环网络,使得可以并行化,加速训练和推理。由于速度快可以用更多语料,又做了数据增强。还有QA老套路,做context-query两个互相attention交互。
bibtex
@inproceedings{ wei2018fast, title={Fast and Accurate Reading Comprehension by Combining Self-Attention and Convolution}, author={Adams Wei Yu and David Dohan and Quoc Le and Thang Luong and Rui Zhao and Kai Chen}, booktitle={International Conference on Learning Representations}, year={2018}, url={https://openreview.net/forum?id=B14TlG-RW}, }
link
https://openreview.net/forum?id=B14TlG-RW¬eId=B14TlG-RW
publication
ICLR 2018 long accepted
open source
https://github.com/allenai/bi-att-flow
affiliated
Carnegie Mellon University, Google Brain