first commit
This commit is contained in:
parent
0ad85b5c4c
commit
62ed5e251a
|
@ -0,0 +1,10 @@
|
||||||
|
{
|
||||||
|
"word_embedding_dimension": 896,
|
||||||
|
"pooling_mode_cls_token": false,
|
||||||
|
"pooling_mode_mean_tokens": true,
|
||||||
|
"pooling_mode_max_tokens": false,
|
||||||
|
"pooling_mode_mean_sqrt_len_tokens": false,
|
||||||
|
"pooling_mode_weightedmean_tokens": false,
|
||||||
|
"pooling_mode_lasttoken": false,
|
||||||
|
"include_prompt": true
|
||||||
|
}
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"<|endoftext|>": 151643,
|
||||||
|
"<|im_end|>": 151645,
|
||||||
|
"<|im_start|>": 151644
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
{
|
||||||
|
"architectures": [
|
||||||
|
"Qwen2Model"
|
||||||
|
],
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bos_token_id": 151643,
|
||||||
|
"eos_token_id": 151643,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 896,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 4864,
|
||||||
|
"max_position_embeddings": 131072,
|
||||||
|
"max_window_layers": 24,
|
||||||
|
"model_type": "qwen2",
|
||||||
|
"num_attention_heads": 14,
|
||||||
|
"num_hidden_layers": 24,
|
||||||
|
"num_key_value_heads": 2,
|
||||||
|
"rms_norm_eps": 1e-06,
|
||||||
|
"rope_theta": 1000000.0,
|
||||||
|
"sliding_window": 131072,
|
||||||
|
"tie_word_embeddings": true,
|
||||||
|
"torch_dtype": "float32",
|
||||||
|
"transformers_version": "4.39.2",
|
||||||
|
"use_cache": false,
|
||||||
|
"use_sliding_window": false,
|
||||||
|
"vocab_size": 151936
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"__version__": {
|
||||||
|
"sentence_transformers": "2.7.0",
|
||||||
|
"transformers": "4.39.2",
|
||||||
|
"pytorch": "2.1.0+cpu"
|
||||||
|
},
|
||||||
|
"prompts": {
|
||||||
|
"query": "",
|
||||||
|
"document": ""
|
||||||
|
},
|
||||||
|
"default_prompt_name": null
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
@ -0,0 +1,20 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"idx": 0,
|
||||||
|
"name": "0",
|
||||||
|
"path": "",
|
||||||
|
"type": "sentence_transformers.models.Transformer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"idx": 1,
|
||||||
|
"name": "1",
|
||||||
|
"path": "1_Pooling",
|
||||||
|
"type": "sentence_transformers.models.Pooling"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"idx": 2,
|
||||||
|
"name": "2",
|
||||||
|
"path": "2_Normalize",
|
||||||
|
"type": "sentence_transformers.models.Normalize"
|
||||||
|
}
|
||||||
|
]
|
|
@ -0,0 +1,4 @@
|
||||||
|
{
|
||||||
|
"max_seq_length": 32768,
|
||||||
|
"do_lower_case": false
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
{
|
||||||
|
"additional_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>"
|
||||||
|
],
|
||||||
|
"eos_token": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"pad_token": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,267 @@
|
||||||
|
|
||||||
|
from typing import List, Optional
|
||||||
|
from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer as OriginalQwen2Tokenizer
|
||||||
|
from transformers.models.qwen2.tokenization_qwen2_fast import Qwen2TokenizerFast as OriginalQwen2TokenizerFast
|
||||||
|
from tokenizers import processors
|
||||||
|
|
||||||
|
VOCAB_FILES_NAMES = {
|
||||||
|
"vocab_file": "vocab.json",
|
||||||
|
"merges_file": "merges.txt",
|
||||||
|
"tokenizer_file": "tokenizer.json",
|
||||||
|
}
|
||||||
|
|
||||||
|
class Qwen2Tokenizer(OriginalQwen2Tokenizer):
|
||||||
|
"""
|
||||||
|
Construct a Qwen2 tokenizer. Based on byte-level Byte-Pair-Encoding.
|
||||||
|
|
||||||
|
Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
|
||||||
|
be encoded differently whether it is at the beginning of the sentence (without space) or not:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import Qwen2Tokenizer
|
||||||
|
|
||||||
|
>>> tokenizer = Qwen2Tokenizer.from_pretrained("Qwen/Qwen-tokenizer")
|
||||||
|
>>> tokenizer("Hello world")["input_ids"]
|
||||||
|
[9707, 1879]
|
||||||
|
|
||||||
|
>>> tokenizer(" Hello world")["input_ids"]
|
||||||
|
[21927, 1879]
|
||||||
|
```
|
||||||
|
This is expected.
|
||||||
|
|
||||||
|
You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
|
||||||
|
|
||||||
|
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
||||||
|
this superclass for more information regarding those methods.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
vocab_file (`str`):
|
||||||
|
Path to the vocabulary file.
|
||||||
|
merges_file (`str`):
|
||||||
|
Path to the merges file.
|
||||||
|
errors (`str`, *optional*, defaults to `"replace"`):
|
||||||
|
Paradigm to follow when decoding bytes to UTF-8. See
|
||||||
|
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
|
||||||
|
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
||||||
|
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
||||||
|
token instead.
|
||||||
|
bos_token (`str`, *optional*):
|
||||||
|
The beginning of sequence token. Not applicable for this tokenizer.
|
||||||
|
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
||||||
|
The end of sequence token.
|
||||||
|
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
||||||
|
The token used for padding, for example when batching sequences of different lengths.
|
||||||
|
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
||||||
|
Whether or not the model should cleanup the spaces that were added when splitting the input text during the
|
||||||
|
tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
|
||||||
|
split_special_tokens (`bool`, *optional*, defaults to `False`):
|
||||||
|
Whether or not the special tokens should be split during the tokenization process. The default behavior is
|
||||||
|
to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
|
||||||
|
['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
|
||||||
|
'|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
|
||||||
|
add_eos_token (`bool`, *optional*, defaults to `False`):
|
||||||
|
Whether or not to add an `eos_token` at the end of sequences.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
vocab_file,
|
||||||
|
merges_file,
|
||||||
|
errors="replace",
|
||||||
|
unk_token="<|endoftext|>",
|
||||||
|
bos_token=None,
|
||||||
|
eos_token="<|endoftext|>",
|
||||||
|
pad_token="<|endoftext|>",
|
||||||
|
clean_up_tokenization_spaces=False,
|
||||||
|
split_special_tokens=False,
|
||||||
|
add_eos_token=False,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
# The add_eos_token code was inspired by the LlamaTokenizer
|
||||||
|
self.add_eos_token = add_eos_token
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
vocab_file=vocab_file,
|
||||||
|
merges_file=merges_file,
|
||||||
|
errors=errors,
|
||||||
|
unk_token=unk_token,
|
||||||
|
bos_token=bos_token,
|
||||||
|
eos_token=eos_token,
|
||||||
|
pad_token=pad_token,
|
||||||
|
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
||||||
|
split_special_tokens=split_special_tokens,
|
||||||
|
add_eos_token=add_eos_token,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
||||||
|
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
|
||||||
|
|
||||||
|
output = token_ids_0 + eos_token_id
|
||||||
|
|
||||||
|
if token_ids_1 is not None:
|
||||||
|
output = output + token_ids_1 + eos_token_id
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
def get_special_tokens_mask(
|
||||||
|
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
||||||
|
) -> List[int]:
|
||||||
|
"""
|
||||||
|
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
||||||
|
special tokens using the tokenizer `prepare_for_model` method.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token_ids_0 (`List[int]`):
|
||||||
|
List of IDs.
|
||||||
|
token_ids_1 (`List[int]`, *optional*):
|
||||||
|
Optional second list of IDs for sequence pairs.
|
||||||
|
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
||||||
|
Whether or not the token list is already formatted with special tokens for the model.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
||||||
|
"""
|
||||||
|
if already_has_special_tokens:
|
||||||
|
return super().get_special_tokens_mask(
|
||||||
|
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
||||||
|
)
|
||||||
|
|
||||||
|
eos_token_id = [1] if self.add_eos_token else []
|
||||||
|
|
||||||
|
if token_ids_1 is None:
|
||||||
|
return ([0] * len(token_ids_0)) + eos_token_id
|
||||||
|
return (
|
||||||
|
([0] * len(token_ids_0))
|
||||||
|
+ eos_token_id
|
||||||
|
+ ([0] * len(token_ids_1))
|
||||||
|
+ eos_token_id
|
||||||
|
)
|
||||||
|
|
||||||
|
def create_token_type_ids_from_sequences(
|
||||||
|
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||||||
|
) -> List[int]:
|
||||||
|
"""
|
||||||
|
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
|
||||||
|
sequence pair mask has the following format:
|
||||||
|
|
||||||
|
```
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
||||||
|
| first sequence | second sequence |
|
||||||
|
```
|
||||||
|
|
||||||
|
if token_ids_1 is None, only returns the first portion of the mask (0s).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token_ids_0 (`List[int]`):
|
||||||
|
List of ids.
|
||||||
|
token_ids_1 (`List[int]`, *optional*):
|
||||||
|
Optional second list of IDs for sequence pairs.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
||||||
|
"""
|
||||||
|
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
|
||||||
|
|
||||||
|
output = [0] * len(token_ids_0 + eos_token_id)
|
||||||
|
|
||||||
|
if token_ids_1 is not None:
|
||||||
|
output += [1] * len(token_ids_1 + eos_token_id)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
class Qwen2TokenizerFast(OriginalQwen2TokenizerFast):
|
||||||
|
"""
|
||||||
|
Construct a "fast" Qwen2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
|
||||||
|
Byte-Pair-Encoding.
|
||||||
|
|
||||||
|
Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
|
||||||
|
be encoded differently whether it is at the beginning of the sentence (without space) or not:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import Qwen2TokenizerFast
|
||||||
|
|
||||||
|
>>> tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen-tokenizer")
|
||||||
|
>>> tokenizer("Hello world")["input_ids"]
|
||||||
|
[9707, 1879]
|
||||||
|
|
||||||
|
>>> tokenizer(" Hello world")["input_ids"]
|
||||||
|
[21927, 1879]
|
||||||
|
```
|
||||||
|
This is expected.
|
||||||
|
|
||||||
|
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
|
||||||
|
refer to this superclass for more information regarding those methods.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
vocab_file (`str`, *optional*):
|
||||||
|
Path to the vocabulary file.
|
||||||
|
merges_file (`str`, *optional*):
|
||||||
|
Path to the merges file.
|
||||||
|
tokenizer_file (`str`, *optional*):
|
||||||
|
Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
|
||||||
|
contains everything needed to load the tokenizer.
|
||||||
|
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
||||||
|
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
||||||
|
token instead. Not applicable to this tokenizer.
|
||||||
|
bos_token (`str`, *optional*):
|
||||||
|
The beginning of sequence token. Not applicable for this tokenizer.
|
||||||
|
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
||||||
|
The end of sequence token.
|
||||||
|
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
||||||
|
The token used for padding, for example when batching sequences of different lengths.
|
||||||
|
add_eos_token (`bool`, *optional*, defaults to `False`):
|
||||||
|
Whether or not to add an `eos_token` at the end of sequences.
|
||||||
|
"""
|
||||||
|
|
||||||
|
slow_tokenizer_class = Qwen2Tokenizer
|
||||||
|
padding_side = "left"
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
vocab_file=None,
|
||||||
|
merges_file=None,
|
||||||
|
tokenizer_file=None,
|
||||||
|
unk_token="<|endoftext|>",
|
||||||
|
bos_token=None,
|
||||||
|
eos_token="<|endoftext|>",
|
||||||
|
pad_token="<|endoftext|>",
|
||||||
|
add_eos_token=False,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__(
|
||||||
|
vocab_file=vocab_file,
|
||||||
|
merges_file=merges_file,
|
||||||
|
tokenizer_file=tokenizer_file,
|
||||||
|
unk_token=unk_token,
|
||||||
|
bos_token=bos_token,
|
||||||
|
eos_token=eos_token,
|
||||||
|
pad_token=pad_token,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._add_eos_token = add_eos_token
|
||||||
|
self.update_post_processor()
|
||||||
|
|
||||||
|
def update_post_processor(self):
|
||||||
|
"""
|
||||||
|
Updates the underlying post processor with the current `eos_token`.
|
||||||
|
"""
|
||||||
|
eos = self.eos_token
|
||||||
|
eos_token_id = self.eos_token_id
|
||||||
|
if eos is None and self.add_eos_token:
|
||||||
|
raise ValueError("add_eos_token = True but eos_token = None")
|
||||||
|
|
||||||
|
single = f"$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
|
||||||
|
pair = f"{single} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
|
||||||
|
|
||||||
|
special_tokens = []
|
||||||
|
if self.add_eos_token:
|
||||||
|
special_tokens.append((eos, eos_token_id))
|
||||||
|
self._tokenizer.post_processor = processors.TemplateProcessing(
|
||||||
|
single=single, pair=pair, special_tokens=special_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def add_eos_token(self):
|
||||||
|
return self._add_eos_token
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,56 @@
|
||||||
|
{
|
||||||
|
"add_prefix_space": false,
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"151643": {
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151644": {
|
||||||
|
"content": "<|im_start|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"151645": {
|
||||||
|
"content": "<|im_end|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additional_special_tokens": [
|
||||||
|
"<|im_start|>",
|
||||||
|
"<|im_end|>"
|
||||||
|
],
|
||||||
|
"auto_map": {
|
||||||
|
"AutoTokenizer": [
|
||||||
|
"tokenization_qwen.Qwen2Tokenizer",
|
||||||
|
"tokenization_qwen.Qwen2TokenizerFast"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"bos_token": null,
|
||||||
|
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "<|endoftext|>",
|
||||||
|
"errors": "replace",
|
||||||
|
"max_length": 512,
|
||||||
|
"model_max_length": 32768,
|
||||||
|
"pad_to_multiple_of": null,
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
"pad_token_type_id": 0,
|
||||||
|
"padding_side": "left",
|
||||||
|
"split_special_tokens": false,
|
||||||
|
"stride": 0,
|
||||||
|
"tokenizer_class": "Qwen2Tokenizer",
|
||||||
|
"truncation_side": "right",
|
||||||
|
"truncation_strategy": "longest_first",
|
||||||
|
"unk_token": null
|
||||||
|
}
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue