Motivation: State-of-the-art biomedical named entity recognition (BioNER)
systems often require handcrafted features specific to each entity type, such
as genes, chemicals and diseases. Although recent studies explored using neural
network models for BioNER to free experts from manual feature engineering, the
performance remains limited by the available training data for each entity
type. Results: We propose a multi-task learning framework for BioNER to
collectively use the training data of different types of entities and improve
the performance on each of them. In experiments on 15 benchmark BioNER
datasets, our multi-task model achieves substantially better performance
compared with state-of-the-art BioNER systems and baseline neural sequence
labeling models. Further analysis shows that the large performance gains come
from sharing character- and word-level information among relevant biomedical
entities across differently labeled corpora.
Описание
Cross-type Biomedical Named Entity Recognition with Deep Multi-Task Learning
%0 Generic
%1 wang2018crosstype
%A Wang, Xuan
%A Zhang, Yu
%A Ren, Xiang
%A Zhang, Yuhao
%A Zitnik, Marinka
%A Shang, Jingbo
%A Langlotz, Curtis
%A Han, Jiawei
%D 2018
%K NER ba_viola learning med multi-task
%T Cross-type Biomedical Named Entity Recognition with Deep Multi-Task
Learning
%U http://arxiv.org/abs/1801.09851
%X Motivation: State-of-the-art biomedical named entity recognition (BioNER)
systems often require handcrafted features specific to each entity type, such
as genes, chemicals and diseases. Although recent studies explored using neural
network models for BioNER to free experts from manual feature engineering, the
performance remains limited by the available training data for each entity
type. Results: We propose a multi-task learning framework for BioNER to
collectively use the training data of different types of entities and improve
the performance on each of them. In experiments on 15 benchmark BioNER
datasets, our multi-task model achieves substantially better performance
compared with state-of-the-art BioNER systems and baseline neural sequence
labeling models. Further analysis shows that the large performance gains come
from sharing character- and word-level information among relevant biomedical
entities across differently labeled corpora.
@misc{wang2018crosstype,
abstract = {Motivation: State-of-the-art biomedical named entity recognition (BioNER)
systems often require handcrafted features specific to each entity type, such
as genes, chemicals and diseases. Although recent studies explored using neural
network models for BioNER to free experts from manual feature engineering, the
performance remains limited by the available training data for each entity
type. Results: We propose a multi-task learning framework for BioNER to
collectively use the training data of different types of entities and improve
the performance on each of them. In experiments on 15 benchmark BioNER
datasets, our multi-task model achieves substantially better performance
compared with state-of-the-art BioNER systems and baseline neural sequence
labeling models. Further analysis shows that the large performance gains come
from sharing character- and word-level information among relevant biomedical
entities across differently labeled corpora.},
added-at = {2020-06-25T13:16:15.000+0200},
author = {Wang, Xuan and Zhang, Yu and Ren, Xiang and Zhang, Yuhao and Zitnik, Marinka and Shang, Jingbo and Langlotz, Curtis and Han, Jiawei},
biburl = {https://www.bibsonomy.org/bibtex/21be8dae3b00a22ae4c7a2c76d57c5f85/schwemmlein},
description = {Cross-type Biomedical Named Entity Recognition with Deep Multi-Task Learning},
interhash = {a170412760a3ea288e12e9a479f500ed},
intrahash = {1be8dae3b00a22ae4c7a2c76d57c5f85},
keywords = {NER ba_viola learning med multi-task},
note = {cite arxiv:1801.09851Comment: 7 pages, 4 figures},
timestamp = {2020-06-25T13:16:15.000+0200},
title = {Cross-type Biomedical Named Entity Recognition with Deep Multi-Task
Learning},
url = {http://arxiv.org/abs/1801.09851},
year = 2018
}