64 lines
1.8 KiB
Python
64 lines
1.8 KiB
Python
from transformers import LlamaTokenizerFast
|
|
|
|
|
|
class LlamaTokenizerWrapper(LlamaTokenizerFast):
|
|
def __init__(self, **kwargs):
|
|
super().__init__(**kwargs)
|
|
|
|
self.im_start = "<|image_start|>"
|
|
self.im_end = "<|image_end|>"
|
|
self.ref_start = "<|ref_start>|"
|
|
self.ref_end = "<|ref_end|>"
|
|
self.box_start = "<|box_start|>"
|
|
self.box_end = "<|box_end|>"
|
|
self.quad_start = "<|quad_start>"
|
|
self.quad_end = "<|quad_end|>"
|
|
self.point_start = "<|point_start|>"
|
|
self.point_end = "<|point_end|>"
|
|
self.slice_start = "<|slice_start|>"
|
|
self.slice_end = "<|slice_end|>"
|
|
self.audio_start = "<|audio_start|>"
|
|
self.audio_end = "<|audio_end|>"
|
|
self.eos_token = "<|turn_end|>"
|
|
self.pad_token = "<|pad|>"
|
|
|
|
@property
|
|
def eos_id(self):
|
|
return self.eos_token_id
|
|
|
|
@property
|
|
def unk_id(self):
|
|
return self.unk_token_id
|
|
|
|
@property
|
|
def im_start_id(self):
|
|
return self.encode(self.im_start, add_special_tokens=False)[0]
|
|
|
|
@property
|
|
def im_end_id(self):
|
|
return self.encode(self.im_end, add_special_tokens=False)[0]
|
|
|
|
@property
|
|
def slice_start_id(self):
|
|
return self.encode(self.slice_start, add_special_tokens=False)[0]
|
|
|
|
@property
|
|
def slice_end_id(self):
|
|
return self.encode(self.slice_end, add_special_tokens=False)[0]
|
|
|
|
@property
|
|
def audio_start_id(self):
|
|
return self.encode(self.audio_start, add_special_tokens=False)[0]
|
|
|
|
@property
|
|
def audio_end_id(self):
|
|
return self.encode(self.audio_end, add_special_tokens=False)[0]
|
|
|
|
@property
|
|
def eos_token_id(self):
|
|
return self.encode(self.eos_token, add_special_tokens=False)[0]
|
|
|
|
@property
|
|
def pad_token_id(self):
|
|
return self.encode(self.eos_token, add_special_tokens=False)[0]
|