396 lines
16 KiB
Python
396 lines
16 KiB
Python
import base64
|
||
import json
|
||
import os
|
||
from typing import List, Optional, Union, Dict, Any
|
||
|
||
import regex as re
|
||
import tiktoken
|
||
from torch import TensorType
|
||
from transformers import PreTrainedTokenizer
|
||
from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
|
||
from transformers.utils import PaddingStrategy
|
||
|
||
|
||
class ChatGLM4Tokenizer(PreTrainedTokenizer):
|
||
vocab_files_names = {"vocab_file": "tokenizer.model"}
|
||
model_input_names = ["input_ids", "attention_mask", "position_ids"]
|
||
|
||
def __init__(
|
||
self,
|
||
vocab_file,
|
||
padding_side="left",
|
||
clean_up_tokenization_spaces=False,
|
||
encode_special_tokens=False,
|
||
**kwargs
|
||
):
|
||
self.name = "GLM4Tokenizer"
|
||
self.vocab_file = vocab_file
|
||
pat_str = "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
|
||
self.pat_str = re.compile(pat_str)
|
||
self.encode_special_tokens = encode_special_tokens
|
||
|
||
mergeable_ranks = {}
|
||
with open(vocab_file) as f:
|
||
for line in f:
|
||
token, rank = line.strip().split()
|
||
rank = int(rank)
|
||
token = base64.b64decode(token)
|
||
mergeable_ranks[token] = rank
|
||
|
||
self.mergeable_ranks = mergeable_ranks
|
||
|
||
self.tokenizer = tiktoken.Encoding(
|
||
name="my_tokenizer",
|
||
pat_str=pat_str,
|
||
mergeable_ranks=mergeable_ranks,
|
||
special_tokens={v.content: int(k) for k, v in kwargs['added_tokens_decoder'].items()}
|
||
# special_tokens={}
|
||
)
|
||
self.decoder = {rank: token for token, rank in mergeable_ranks.items()}
|
||
self.n_words = len(self.decoder)
|
||
|
||
super().__init__(
|
||
padding_side=padding_side,
|
||
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
||
**kwargs
|
||
)
|
||
|
||
@property
|
||
def vocab_size(self):
|
||
return self.n_words
|
||
|
||
def get_vocab(self):
|
||
""" Returns vocab as a dict """
|
||
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
|
||
vocab.update(self.added_tokens_encoder)
|
||
return vocab
|
||
|
||
@staticmethod
|
||
def convert_tokens_to_string(tokens: List[Union[bytes, str]]) -> str:
|
||
"""
|
||
Converts a sequence of tokens in a single string.
|
||
"""
|
||
text = ""
|
||
temp = b""
|
||
for t in tokens:
|
||
if isinstance(t, str):
|
||
if temp:
|
||
text += temp.decode("utf-8", errors="replace")
|
||
temp = b""
|
||
text += t
|
||
elif isinstance(t, bytes):
|
||
temp += t
|
||
else:
|
||
raise TypeError("token should only be of type types or str")
|
||
if temp:
|
||
text += temp.decode("utf-8", errors="replace")
|
||
return text
|
||
|
||
def _tokenize(self, text, **kwargs):
|
||
tokens = []
|
||
ids = self.tokenizer.encode(text)
|
||
for t in ids:
|
||
tokens.append(self.decoder[t])
|
||
return tokens
|
||
|
||
def _convert_token_to_id(self, token):
|
||
""" Converts a token (str) in an id using the vocab. """
|
||
return self.mergeable_ranks[token]
|
||
|
||
def _convert_id_to_token(self, index):
|
||
"""Converts an index (integer) in a token (str) using the vocab."""
|
||
return self.decoder.get(index, "")
|
||
|
||
def save_vocabulary(self, save_directory, filename_prefix=None):
|
||
"""
|
||
Save the vocabulary and special tokens file to a directory.
|
||
|
||
Args:
|
||
save_directory (`str`):
|
||
The directory in which to save the vocabulary.
|
||
filename_prefix (`str`, *optional*):
|
||
An optional prefix to add to the named of the saved files.
|
||
|
||
Returns:
|
||
`Tuple(str)`: Paths to the files saved.
|
||
"""
|
||
if os.path.isdir(save_directory):
|
||
vocab_file = os.path.join(
|
||
save_directory, self.vocab_files_names["vocab_file"]
|
||
)
|
||
else:
|
||
vocab_file = save_directory
|
||
|
||
with open(self.vocab_file, 'rb') as fin:
|
||
proto_str = fin.read()
|
||
|
||
with open(vocab_file, "wb") as writer:
|
||
writer.write(proto_str)
|
||
|
||
return (vocab_file,)
|
||
|
||
def get_prefix_tokens(self):
|
||
prefix_tokens = [self.convert_tokens_to_ids("[gMASK]"), self.convert_tokens_to_ids("<sop>")]
|
||
return prefix_tokens
|
||
|
||
def apply_chat_template(
|
||
self,
|
||
conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]]],
|
||
add_generation_prompt: bool = False,
|
||
tokenize: bool = True,
|
||
padding: bool = False,
|
||
truncation: bool = False,
|
||
max_length: Optional[int] = None,
|
||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||
return_dict: bool = False,
|
||
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
|
||
add_special_tokens: bool = True,
|
||
**kwargs,
|
||
) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:
|
||
|
||
if return_dict and not tokenize:
|
||
raise ValueError(
|
||
"`return_dict=True` is incompatible with `tokenize=False`, because there is no dict "
|
||
"of tokenizer outputs to return."
|
||
)
|
||
|
||
def handle_single_conversation(messages):
|
||
content = "你是一位智能编程助手,你叫CodeGeeX。你会为用户回答关于编程、代码、计算机方面的任何问题,并提供格式规范、可以执行、准确安全的代码,并在必要时提供详细的解释。"
|
||
input_message = self.build_single_message("system", "", content)
|
||
for item in messages:
|
||
role = item.get("role", "")
|
||
if not role:
|
||
raise ValueError("Invalid conversation format, 'role' must be given")
|
||
# function call
|
||
elif role == "tool":
|
||
content = self.build_function_sys_prompt(item["content"])
|
||
input_message = self.build_single_message("system", "", content)
|
||
# chat
|
||
elif role == "system":
|
||
input_message = self.build_single_message("system", item.get("metadata", ""), item["content"])
|
||
else:
|
||
input_message += self.build_single_message(item["role"], item.get("metadata", ""), item["content"])
|
||
|
||
if add_generation_prompt:
|
||
input_message += "<|assistant|>\n"
|
||
if tokenize:
|
||
input_ids = self.get_prefix_tokens() if add_special_tokens else []
|
||
input_ids += self.tokenizer.encode(input_message, allowed_special='all', disallowed_special=set())
|
||
return input_ids
|
||
else:
|
||
return input_message
|
||
|
||
# Main logic to handle different conversation formats
|
||
if isinstance(conversation, list) and all(isinstance(i, dict) for i in conversation):
|
||
result = handle_single_conversation(conversation)
|
||
elif isinstance(conversation, list) and all(isinstance(i, list) for i in conversation):
|
||
result = [handle_single_conversation(c) for c in conversation]
|
||
elif hasattr(conversation, "messages"):
|
||
result = handle_single_conversation(conversation.messages)
|
||
else:
|
||
raise ValueError("Invalid conversation format")
|
||
|
||
if tokenize:
|
||
output = self.batch_encode_plus(
|
||
[result] if isinstance(result[0], int) else result,
|
||
padding=padding,
|
||
truncation=truncation,
|
||
max_length=max_length,
|
||
return_tensors=return_tensors,
|
||
is_split_into_words=True,
|
||
add_special_tokens=False
|
||
)
|
||
if return_dict:
|
||
return output
|
||
else:
|
||
return output["input_ids"]
|
||
else:
|
||
return result
|
||
|
||
def build_inputs_with_special_tokens(
|
||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||
) -> List[int]:
|
||
"""
|
||
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
||
adding special tokens. A BERT sequence has the following format:
|
||
|
||
- single sequence: `[CLS] X [SEP]`
|
||
- pair of sequences: `[CLS] A [SEP] B [SEP]`
|
||
|
||
Args:
|
||
token_ids_0 (`List[int]`):
|
||
List of IDs to which the special tokens will be added.
|
||
token_ids_1 (`List[int]`, *optional*):
|
||
Optional second list of IDs for sequence pairs.
|
||
|
||
Returns:
|
||
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
||
"""
|
||
prefix_tokens = self.get_prefix_tokens()
|
||
token_ids_0 = prefix_tokens + token_ids_0
|
||
if token_ids_1 is not None:
|
||
token_ids_0 = token_ids_0 + token_ids_1 + [self.convert_tokens_to_ids("<eos>")]
|
||
return token_ids_0
|
||
|
||
def _pad(
|
||
self,
|
||
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
|
||
max_length: Optional[int] = None,
|
||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||
pad_to_multiple_of: Optional[int] = None,
|
||
return_attention_mask: Optional[bool] = None,
|
||
) -> dict:
|
||
"""
|
||
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
|
||
|
||
Args:
|
||
encoded_inputs:
|
||
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
|
||
max_length: maximum length of the returned list and optionally padding length (see below).
|
||
Will truncate by taking into account the special tokens.
|
||
padding_strategy: PaddingStrategy to use for padding.
|
||
|
||
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
|
||
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
|
||
- PaddingStrategy.DO_NOT_PAD: Do not pad
|
||
The tokenizer padding sides are defined in self.padding_side:
|
||
|
||
- 'left': pads on the left of the sequences
|
||
- 'right': pads on the right of the sequences
|
||
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
|
||
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
|
||
`>= 7.5` (Volta).
|
||
return_attention_mask:
|
||
(optional) Set to 'False' to avoid returning attention mask (default: set to model specifics)
|
||
"""
|
||
# Load from model defaults
|
||
assert self.padding_side == "left"
|
||
|
||
required_input = encoded_inputs[self.model_input_names[0]]
|
||
seq_length = len(required_input)
|
||
|
||
if padding_strategy == PaddingStrategy.LONGEST:
|
||
max_length = len(required_input)
|
||
|
||
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
|
||
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
|
||
|
||
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
|
||
|
||
# Initialize attention mask if not present.
|
||
if "attention_mask" not in encoded_inputs:
|
||
encoded_inputs["attention_mask"] = [1] * seq_length
|
||
|
||
if "position_ids" not in encoded_inputs:
|
||
encoded_inputs["position_ids"] = list(range(seq_length))
|
||
|
||
if needs_to_be_padded:
|
||
difference = max_length - len(required_input)
|
||
|
||
if "attention_mask" in encoded_inputs:
|
||
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
|
||
if "position_ids" in encoded_inputs:
|
||
encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
|
||
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
|
||
|
||
return encoded_inputs
|
||
|
||
@staticmethod
|
||
def build_single_message(role, metadata, message):
|
||
assert role in ["system", "user", "assistant", "observation"], role
|
||
return f"<|{role}|>{metadata}\n{message}"
|
||
|
||
@staticmethod
|
||
def build_function_sys_prompt(item: dict) -> str:
|
||
prompt = """
|
||
你将接收到一个用户提出的问题,并请撰写清晰、简洁且准确的答案。
|
||
|
||
# Note
|
||
- 我将给你提供一些函数工具的接口信息,包括函数的定义、用途、名字、参数名和参数类型。
|
||
- 请根据这些信息,为用户的指令,从中选择最合适的函数,并给出调用时需要使用的参数。
|
||
- **返回类型为一个json格式的字符串,包含函数名和参数字典。**
|
||
- name: 函数名
|
||
- arguments: 参数字典,其中key为参数名,value为参数类型。
|
||
- **只需要生成答案即可,无需在你的回答之前或之后做出解释,也不要直接回答用户的问题。**
|
||
- 只用当提供的函数工具不足以完成任务时,请你用正常的语气告知用户并解释原因。
|
||
|
||
# Functions
|
||
以下是可使用的函数工具的接口信息。
|
||
""".lstrip()
|
||
|
||
if isinstance(item['function'], dict):
|
||
func = item['function']
|
||
prompt += f"\n## Function 1\n"
|
||
prompt += f"\n### Name\n{func['name']}\n"
|
||
prompt += f"\n### Description\n{func['description']}\n"
|
||
prompt += f"\n### Parameters\n```json\n{json.dumps(func['parameters'], ensure_ascii=False)}\n```\n"
|
||
return prompt
|
||
elif isinstance(item['function'], list):
|
||
for idx, func in enumerate(item['function']):
|
||
prompt += f"\n## Function {idx + 1}\n"
|
||
prompt += f"\n### Name\n{func['name']}\n"
|
||
prompt += f"\n### Description\n{func['description']}\n"
|
||
prompt += f"\n### Parameters\n```json\n{json.dumps(func['parameters'], ensure_ascii=False)}\n```\n"
|
||
return prompt
|
||
|
||
def apply_infilling_template(
|
||
self,
|
||
message: dict,
|
||
add_generation_prompt: bool = False,
|
||
tokenize: bool = True,
|
||
padding: bool = False,
|
||
truncation: bool = False,
|
||
max_length: Optional[int] = None,
|
||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||
return_dict: bool = False,
|
||
add_special_tokens: bool = True,
|
||
) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:
|
||
if return_dict and not tokenize:
|
||
raise ValueError(
|
||
"`return_dict=True` is incompatible with `tokenize=False`, because there is no dict "
|
||
"of tokenizer outputs to return."
|
||
)
|
||
|
||
if not isinstance(message, dict):
|
||
raise ValueError("Invalid conversation format")
|
||
content = self.build_infilling_prompt(message)
|
||
input_message = self.build_single_message("user", "", content)
|
||
if add_generation_prompt:
|
||
input_message += "<|assistant|>\n"
|
||
if not tokenize:
|
||
return input_message
|
||
|
||
input_ids = self.get_prefix_tokens() if add_special_tokens else []
|
||
input_ids += self.tokenizer.encode(input_message, allowed_special='all', disallowed_special=set())
|
||
output = self.batch_encode_plus(
|
||
[input_ids] if isinstance(input_ids[0], int) else input_ids,
|
||
padding=padding,
|
||
truncation=truncation,
|
||
max_length=max_length,
|
||
return_tensors=return_tensors,
|
||
is_split_into_words=True,
|
||
add_special_tokens=False
|
||
)
|
||
if return_dict:
|
||
return output
|
||
else:
|
||
return output["input_ids"]
|
||
|
||
@staticmethod
|
||
def build_infilling_prompt(item: dict) -> str:
|
||
prompt = ""
|
||
if "path" in item:
|
||
prompt += f"###PATH:{item['path']}\n"
|
||
if "language" in item:
|
||
prompt += f"###LANGUAGE:{item['language']}\n"
|
||
elif "lang" in item:
|
||
prompt += f"###LANGUAGE:{item['lang']}\n"
|
||
if "mode" in item and item['mode'].lower() == "line":
|
||
prompt += "###MODE:LINE\n"
|
||
else:
|
||
prompt += "###MODE:BLOCK\n"
|
||
prompt += f"<|code_suffix|>{item['suffix']}"
|
||
prompt += f"<|code_prefix|>{item['prefix']}"
|
||
prompt += "<|code_middle|>"
|
||
return prompt
|