publichealthsurveillance commited on
Commit
863b4b4
1 Parent(s): 11292ac

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -32,7 +32,7 @@ For more details, refer to the paper [Benchmarking for Public Health Surveillanc
32
  @inproceedings{naseem-etal-2022-benchmarking,
33
  title = "Benchmarking for Public Health Surveillance tasks on Social Media with a Domain-Specific Pretrained Language Model",
34
  author = "Naseem, Usman and
35
- Chan Lee, Byoung and
36
  Khushi, Matloob and
37
  Kim, Jinman and
38
  Dunn, Adam",
@@ -42,6 +42,7 @@ For more details, refer to the paper [Benchmarking for Public Health Surveillanc
42
  address = "Dublin, Ireland",
43
  publisher = "Association for Computational Linguistics",
44
  url = "https://aclanthology.org/2022.nlppower-1.3",
 
45
  pages = "22--31",
46
  abstract = "A user-generated text on social media enables health workers to keep track of information, identify possible outbreaks, forecast disease trends, monitor emergency cases, and ascertain disease awareness and response to official health correspondence. This exchange of health information on social media has been regarded as an attempt to enhance public health surveillance (PHS). Despite its potential, the technology is still in its early stages and is not ready for widespread application. Advancements in pretrained language models (PLMs) have facilitated the development of several domain-specific PLMs and a variety of downstream applications. However, there are no PLMs for social media tasks involving PHS. We present and release PHS-BERT, a transformer-based PLM, to identify tasks related to public health surveillance on social media. We compared and benchmarked the performance of PHS-BERT on 25 datasets from different social medial platforms related to 7 different PHS tasks. Compared with existing PLMs that are mainly evaluated on limited tasks, PHS-BERT achieved state-of-the-art performance on all 25 tested datasets, showing that our PLM is robust and generalizable in the common PHS tasks. By making PHS-BERT available, we aim to facilitate the community to reduce the computational cost and introduce new baselines for future works across various PHS-related tasks.",
47
  }
 
32
  @inproceedings{naseem-etal-2022-benchmarking,
33
  title = "Benchmarking for Public Health Surveillance tasks on Social Media with a Domain-Specific Pretrained Language Model",
34
  author = "Naseem, Usman and
35
+ Lee, Byoung Chan and
36
  Khushi, Matloob and
37
  Kim, Jinman and
38
  Dunn, Adam",
 
42
  address = "Dublin, Ireland",
43
  publisher = "Association for Computational Linguistics",
44
  url = "https://aclanthology.org/2022.nlppower-1.3",
45
+ doi = "10.18653/v1/2022.nlppower-1.3",
46
  pages = "22--31",
47
  abstract = "A user-generated text on social media enables health workers to keep track of information, identify possible outbreaks, forecast disease trends, monitor emergency cases, and ascertain disease awareness and response to official health correspondence. This exchange of health information on social media has been regarded as an attempt to enhance public health surveillance (PHS). Despite its potential, the technology is still in its early stages and is not ready for widespread application. Advancements in pretrained language models (PLMs) have facilitated the development of several domain-specific PLMs and a variety of downstream applications. However, there are no PLMs for social media tasks involving PHS. We present and release PHS-BERT, a transformer-based PLM, to identify tasks related to public health surveillance on social media. We compared and benchmarked the performance of PHS-BERT on 25 datasets from different social medial platforms related to 7 different PHS tasks. Compared with existing PLMs that are mainly evaluated on limited tasks, PHS-BERT achieved state-of-the-art performance on all 25 tested datasets, showing that our PLM is robust and generalizable in the common PHS tasks. By making PHS-BERT available, we aim to facilitate the community to reduce the computational cost and introduce new baselines for future works across various PHS-related tasks.",
48
  }