first commit
This commit is contained in:
parent
3d3af54c3e
commit
bbeae6fd9e
58
README.md
58
README.md
|
@ -1,3 +1,57 @@
|
|||
# chinese-roberta-wwm-ext_a13571383564562432345306
|
||||
---
|
||||
language:
|
||||
- zh
|
||||
tags:
|
||||
- bert
|
||||
license: "apache-2.0"
|
||||
---
|
||||
|
||||
hfl Chinese BERT with Whole Word Masking
|
||||
# Please use 'Bert' related functions to load this model!
|
||||
|
||||
## Chinese BERT with Whole Word Masking
|
||||
For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
|
||||
|
||||
**[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
|
||||
Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
|
||||
|
||||
This repository is developed based on:https://github.com/google-research/bert
|
||||
|
||||
You may also interested in,
|
||||
- Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
|
||||
- Chinese MacBERT: https://github.com/ymcui/MacBERT
|
||||
- Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
|
||||
- Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
|
||||
- Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
|
||||
|
||||
More resources by HFL: https://github.com/ymcui/HFL-Anthology
|
||||
|
||||
## Citation
|
||||
If you find the technical report or resource is useful, please cite the following technical report in your paper.
|
||||
- Primary: https://arxiv.org/abs/2004.13922
|
||||
```
|
||||
@inproceedings{cui-etal-2020-revisiting,
|
||||
title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
|
||||
author = "Cui, Yiming and
|
||||
Che, Wanxiang and
|
||||
Liu, Ting and
|
||||
Qin, Bing and
|
||||
Wang, Shijin and
|
||||
Hu, Guoping",
|
||||
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
|
||||
month = nov,
|
||||
year = "2020",
|
||||
address = "Online",
|
||||
publisher = "Association for Computational Linguistics",
|
||||
url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
|
||||
pages = "657--668",
|
||||
}
|
||||
```
|
||||
- Secondary: https://arxiv.org/abs/1906.08101
|
||||
```
|
||||
@article{chinese-bert-wwm,
|
||||
title={Pre-Training with Whole Word Masking for Chinese BERT},
|
||||
author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
|
||||
journal={arXiv preprint arXiv:1906.08101},
|
||||
year={2019}
|
||||
}
|
||||
```
|
|
@ -0,0 +1 @@
|
|||
{}
|
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
"architectures": [
|
||||
"BertForMaskedLM"
|
||||
],
|
||||
"attention_probs_dropout_prob": 0.1,
|
||||
"bos_token_id": 0,
|
||||
"directionality": "bidi",
|
||||
"eos_token_id": 2,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout_prob": 0.1,
|
||||
"hidden_size": 768,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 3072,
|
||||
"layer_norm_eps": 1e-12,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "bert",
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 12,
|
||||
"output_past": true,
|
||||
"pad_token_id": 0,
|
||||
"pooler_fc_size": 768,
|
||||
"pooler_num_attention_heads": 12,
|
||||
"pooler_num_fc_layers": 3,
|
||||
"pooler_size_per_head": 128,
|
||||
"pooler_type": "first_token_transform",
|
||||
"type_vocab_size": 2,
|
||||
"vocab_size": 21128
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
"framework": "pytorch",
|
||||
"task": "fill-mask",
|
||||
"model": {
|
||||
"type": "fill-mask"
|
||||
},
|
||||
"pipeline": {
|
||||
"type": "fill-mask"
|
||||
}
|
||||
}
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1 @@
|
|||
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
Binary file not shown.
File diff suppressed because one or more lines are too long
|
@ -0,0 +1 @@
|
|||
{"init_inputs": []}
|
Loading…
Reference in New Issue