first commit

This commit is contained in:
xxl 2024-11-25 09:18:13 +08:00
parent 7dddaf72e2
commit 70511bb088
8 changed files with 50093 additions and 2 deletions

View File

@ -1,3 +1,33 @@
# layoutlmv3-large_a13684308344434688439059
---
language: en
license: cc-by-nc-sa-4.0
layoutlmv3-large
---
# LayoutLMv3
[Microsoft Document AI](https://www.microsoft.com/en-us/research/project/document-ai/) | [GitHub](https://aka.ms/layoutlmv3)
## Model description
LayoutLMv3 is a pre-trained multimodal Transformer for Document AI with unified text and image masking. The simple unified architecture and training objectives make LayoutLMv3 a general-purpose pre-trained model. For example, LayoutLMv3 can be fine-tuned for both text-centric tasks, including form understanding, receipt understanding, and document visual question answering, and image-centric tasks such as document image classification and document layout analysis.
[LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387)
Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei, Preprint 2022.
## Citation
If you find LayoutLM useful in your research, please cite the following paper:
```
@inproceedings{huang2022layoutlmv3,
author={Yupan Huang and Tengchao Lv and Lei Cui and Yutong Lu and Furu Wei},
title={LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking},
booktitle={Proceedings of the 30th ACM International Conference on Multimedia},
year={2022}
}
```
## License
The content of this project itself is licensed under the [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/).
Portions of the source code are based on the [transformers](https://github.com/huggingface/transformers) project.
[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct)

33
config.json Normal file
View File

@ -0,0 +1,33 @@
{
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"classifier_dropout": null,
"coordinate_size": 171,
"eos_token_id": 2,
"has_relative_attention_bias": true,
"has_spatial_attention_bias": true,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"input_size": 224,
"intermediate_size": 4096,
"layer_norm_eps": 1e-05,
"max_2d_position_embeddings": 1024,
"max_position_embeddings": 514,
"max_rel_2d_pos": 256,
"max_rel_pos": 128,
"model_type": "layoutlmv3",
"num_attention_heads": 16,
"num_hidden_layers": 24,
"pad_token_id": 1,
"rel_2d_pos_bins": 64,
"rel_pos_bins": 32,
"second_input_size": 112,
"shape_size": 170,
"torch_dtype": "float32",
"transformers_version": "4.12.5",
"type_vocab_size": 1,
"visual_embed": true,
"vocab_size": 50265
}

50001
merges.txt Normal file

File diff suppressed because it is too large Load Diff

19
preprocessor_config.json Normal file
View File

@ -0,0 +1,19 @@
{
"apply_ocr": true,
"do_normalize": true,
"do_resize": true,
"feature_extractor_type": "LayoutLMv3FeatureExtractor",
"image_mean": [
0.5,
0.5,
0.5
],
"image_std": [
0.5,
0.5,
0.5
],
"ocr_lang": null,
"resample": 2,
"size": 224
}

BIN
pytorch_model.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
tf_model.h5 (Stored with Git LFS) Normal file

Binary file not shown.

1
tokenizer_config.json Normal file
View File

@ -0,0 +1 @@
{"errors": "replace", "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": true, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "roberta-large", "tokenizer_class": "LayoutLMv3Tokenizer"}

1
vocab.json Normal file

File diff suppressed because one or more lines are too long