first commit

This commit is contained in:
yang yingjie 2025-01-21 13:55:11 +08:00
parent 559b22fdc7
commit 8daf9bcd00
6 changed files with 8330 additions and 0 deletions

2164
config.json Normal file

File diff suppressed because it is too large Load Diff

17
preprocessor_config.json Normal file
View File

@ -0,0 +1,17 @@
{
"do_normalize": true,
"do_resize": true,
"feature_extractor_type": "ViTFeatureExtractor",
"image_mean": [
0.5,
0.5,
0.5
],
"image_std": [
0.5,
0.5,
0.5
],
"resample": 2,
"size": 224
}

BIN
pytorch_model.bin (Stored with Git LFS) Normal file

Binary file not shown.

1
special_tokens_map.json Normal file
View File

@ -0,0 +1 @@
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}

1
tokenizer_config.json Normal file
View File

@ -0,0 +1 @@
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "do_lower_case": false, "do_word_tokenize": true, "do_subword_tokenize": true, "word_tokenizer_type": "mecab", "subword_tokenizer_type": "character", "never_split": null, "mecab_kwargs": {"mecab_dic": "unidic_lite"}, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "cl-tohoku/bert-base-japanese-char-v2", "tokenizer_class": "BertJapaneseTokenizer"}

6144
vocab.txt Normal file

File diff suppressed because it is too large Load Diff