first commit

This commit is contained in:
xxl 2025-03-05 16:53:14 +08:00
parent 784b122529
commit fb188e15dd
9 changed files with 151756 additions and 1 deletions

View File

@ -1,3 +1,36 @@
---
license: apache-2.0
datasets:
- BAAI/CCI3-HQ
language:
- en
- zh
---
# ChineseModernBert
ChineseModernBert
<p>
<a href=""><b>ChineseModernBert</b></a> &nbsp&nbsp &nbsp&nbsp 🖥️ <a href="https://github.com/enze5088/ChineseModernBert">GitHub</a> &nbsp&nbsp | &nbsp&nbsp🤗 <a href="https://huggingface.co/TurboPascal/ChineseModernBert">Hugging Face</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href="https://zhuanlan.zhihu.com/p/26774266896">Blog</a> &nbsp&nbsp
<br>
在自然语言处理领域,[BERT](https://huggingface.co/papers/1810.04805)自 2018 年发布以来,凭借其在语言理解和生成任务上的卓越表现,一直被广泛应用。然而,随着时间的推移,无论是中文还是英文的常用 Bert 模型,由于其发布时间距今已有 5 - 6 年,在模型架构和训练数据方面都逐渐显露出陈旧的问题。
近期,**ModernBERT**在 12 月19日左右发布为自然语言处理领域带来了新的活力。但目前该模型主要面向英文用户对于中文用户来说其适用性存在一定的局限性。为了填补这一空白满足中文自然语言处理任务的需求笔者精心构建了基于中文语料的 ModelBert 中文版本模型。
ModelBert 是一个专门基于中文预训练语料进行训练的预训练模型。在训练过程中,选用了高质量的[C](https://huggingface.co/datasets/BAAI/CCI3-HQ)[CI3-](https://huggingface.co/datasets/BAAI/CCI3-HQ)[HQ](https://huggingface.co/datasets/BAAI/CCI3-HQ)数据集进行 1epoch 的预训练。CCI3-HQ 数据集包含了丰富多样的中文文本,涵盖了新闻资讯、文学作品、学术论文、社交媒体内容等多个领域,这使得 ModelBert 能够学习到全面且深入的中文语言特征和语义信息。
训练代码地址https://github.com/enze5088/ChineseModernBert
## 训练细节
- **硬件配置**笔者训练资源有限本次训练使用了3\*8\*A100预训练时间为58小时左右。
- **优化器与学习率**优化器采用adamw初始学习率设置为 1e-4。learning rate scheduler 采用余弦退火。后续可能采用WSD等学习率计划从新优化。
- **Tokenizer**Tokenizer 选用了 Qwen2.5 系列。
- **Batch Size**单卡Batch Size 设置为4总Batch size为96。
- **上下文长度**:上下文长度设置为 4096并采用packing等策略。
- **训练策略**:采用了 Packing 等策略。MLM比率设置为0.3.
## 开源协议
ModelBert 遵循 Apache 2.0 开源协议发布。这意味着你可以自由地使用、修改和分发该模型,但请务必遵守开源协议中的相关规定,并在使用过程中保留模型的版权声明。

25
added_tokens.json Normal file
View File

@ -0,0 +1,25 @@
{
"</tool_call>": 151658,
"<tool_call>": 151657,
"<|box_end|>": 151649,
"<|box_start|>": 151648,
"<|endoftext|>": 151643,
"<|file_sep|>": 151664,
"<|fim_middle|>": 151660,
"<|fim_pad|>": 151662,
"<|fim_prefix|>": 151659,
"<|fim_suffix|>": 151661,
"<|im_end|>": 151645,
"<|im_start|>": 151644,
"<|image_pad|>": 151655,
"<|object_ref_end|>": 151647,
"<|object_ref_start|>": 151646,
"<|quad_end|>": 151651,
"<|quad_start|>": 151650,
"<|repo_name|>": 151663,
"<|video_pad|>": 151656,
"<|vision_end|>": 151653,
"<|vision_pad|>": 151654,
"<|vision_start|>": 151652,
"[MASK]": 151665
}

46
config.json Normal file
View File

@ -0,0 +1,46 @@
{
"_name_or_path": "ModernChineseBertLarge",
"architectures": [
"ModernBertForMaskedLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 151643,
"classifier_activation": "gelu",
"classifier_bias": false,
"classifier_dropout": 0.0,
"classifier_pooling": "mean",
"cls_token_id": 151643,
"decoder_bias": true,
"deterministic_flash_attn": false,
"embedding_dropout": 0.0,
"eos_token_id": 151645,
"global_attn_every_n_layers": 3,
"global_rope_theta": 160000.0,
"gradient_checkpointing": false,
"hidden_activation": "gelu",
"hidden_size": 1024,
"initializer_cutoff_factor": 2.0,
"initializer_range": 0.02,
"intermediate_size": 2624,
"layer_norm_eps": 1e-05,
"local_attention": 128,
"local_rope_theta": 10000.0,
"max_position_embeddings": 8192,
"mlp_bias": false,
"mlp_dropout": 0.0,
"model_type": "modernbert",
"norm_bias": false,
"norm_eps": 1e-05,
"num_attention_heads": 16,
"num_hidden_layers": 28,
"pad_token_id": 151643,
"reference_compile": true,
"repad_logits_with_grad": false,
"sep_token_id": 151643,
"sparse_pred_ignore_index": -100,
"sparse_prediction": false,
"torch_dtype": "float32",
"transformers_version": "4.49.0",
"vocab_size": 151936
}

151388
merges.txt Normal file

File diff suppressed because it is too large Load Diff

BIN
model.safetensors (Stored with Git LFS) Normal file

Binary file not shown.

38
special_tokens_map.json Normal file
View File

@ -0,0 +1,38 @@
{
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>",
"<|object_ref_start|>",
"<|object_ref_end|>",
"<|box_start|>",
"<|box_end|>",
"<|quad_start|>",
"<|quad_end|>",
"<|vision_start|>",
"<|vision_end|>",
"<|vision_pad|>",
"<|image_pad|>",
"<|video_pad|>"
],
"eos_token": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"mask_token": {
"content": "[MASK]",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

BIN
tokenizer.json (Stored with Git LFS) Normal file

Binary file not shown.

218
tokenizer_config.json Normal file
View File

@ -0,0 +1,218 @@
{
"add_bos_token": false,
"add_prefix_space": false,
"added_tokens_decoder": {
"151643": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151644": {
"content": "<|im_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151645": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151646": {
"content": "<|object_ref_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151647": {
"content": "<|object_ref_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151648": {
"content": "<|box_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151649": {
"content": "<|box_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151650": {
"content": "<|quad_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151651": {
"content": "<|quad_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151652": {
"content": "<|vision_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151653": {
"content": "<|vision_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151654": {
"content": "<|vision_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151655": {
"content": "<|image_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151656": {
"content": "<|video_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151657": {
"content": "<tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151658": {
"content": "</tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151659": {
"content": "<|fim_prefix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151660": {
"content": "<|fim_middle|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151661": {
"content": "<|fim_suffix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151662": {
"content": "<|fim_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151663": {
"content": "<|repo_name|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151664": {
"content": "<|file_sep|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151665": {
"content": "[MASK]",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
}
},
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>",
"<|object_ref_start|>",
"<|object_ref_end|>",
"<|box_start|>",
"<|box_end|>",
"<|quad_start|>",
"<|quad_end|>",
"<|vision_start|>",
"<|vision_end|>",
"<|vision_pad|>",
"<|image_pad|>",
"<|video_pad|>"
],
"bos_token": null,
"chat_template": "{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
"clean_up_tokenization_spaces": false,
"eos_token": "<|im_end|>",
"errors": "replace",
"extra_special_tokens": {},
"mask_token": "[MASK]",
"model_max_length": 131072,
"pad_token": "<|endoftext|>",
"processor_class": "Qwen2VLProcessor",
"split_special_tokens": false,
"tokenizer_class": "Qwen2Tokenizer",
"unk_token": null
}

1
vocab.json Normal file

File diff suppressed because one or more lines are too long