first commit
This commit is contained in:
parent
df22d41f6d
commit
0efb63f941
AyaVision8BWinRates(AyaVisionBench).pngAyaVision8BWinRates(m-WildVision).pngAya_Vision_8B_Combined_Win_Rates.pngAya_Vision_8BvsPangea(AyaVisionBench).pngEfficiencyvsPerformance.pngVision_Text_Performance.pngaya-vision-8B.pngchat_template.jsonconfig.jsonconfiguration.jsongeneration_config.jsonmodel-00001-of-00004.safetensorsmodel-00002-of-00004.safetensorsmodel-00003-of-00004.safetensorsmodel-00004-of-00004.safetensorsmodel.safetensors.index.jsonpreprocessor_config.jsonprocessor_config.jsonspecial_tokens_map.jsontokenizer.jsontokenizer_config.json
Binary file not shown.
After ![]() (image error) Size: 145 KiB |
Binary file not shown.
After ![]() (image error) Size: 148 KiB |
Binary file not shown.
After ![]() (image error) Size: 285 KiB |
Binary file not shown.
After ![]() (image error) Size: 150 KiB |
Binary file not shown.
After ![]() (image error) Size: 190 KiB |
Binary file not shown.
After ![]() (image error) Size: 159 KiB |
Binary file not shown.
After ![]() (image error) Size: 410 KiB |
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"chat_template": "{{ bos_token }}<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes. When analyzing images, carefully describe and interpret their content while avoiding any promotion of harm, misinformation, or bias.\n\nYou are Aya Vision, a vision-language model built by Cohere for AI. You have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew. You are capable of interpreting images, including describing them, answering questions about their contents, extracting textual information, and analyzing visual context. Your responses must maintain the highest standards of quality, accuracy, and safety.\n\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Aya Vision.\n- You are a large language model built by Cohere for AI.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages -%}\n <|START_OF_TURN_TOKEN|>{{ message.role | replace(\"user\", \"<|USER_TOKEN|>\") | replace(\"assistant\", \"<|CHATBOT_TOKEN|><|START_RESPONSE|>\") | replace(\"system\", \"<|SYSTEM_TOKEN|>\") }}\n {%- if message.content is defined -%}\n {%- if message.content is string -%}\n{{ message.content }}\n {%- else -%}\n {%- for item in message.content | selectattr('type', 'equalto', 'image') -%}\n<image>\n {%- endfor -%}\n {%- for item in message.content | selectattr('type', 'equalto', 'text') -%}\n{{ item.text }}\n {%- endfor -%}\n {%- endif -%}\n {%- elif message.message is defined -%}\n {%- if message.message is string -%}\n{{ message.message }}\n {%- else -%}\n {%- for item in message.message | selectattr('type', 'equalto', 'image') -%}\n<image>\n {%- endfor -%}\n {%- for item in message.message | selectattr('type', 'equalto', 'text') -%}\n{{ item.text }}\n {%- endfor -%}\n {%- endif -%}\n {%- endif -%}\n {%- if message.role == \"assistant\" -%}\n<|END_RESPONSE|>\n {%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\n{%- endif -%}\n"
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
{
|
||||||
|
"_name_or_path": "aya-vision-8b",
|
||||||
|
"adapter_layer_norm_eps": 1e-06,
|
||||||
|
"alignment_activation_fn": "swiglu",
|
||||||
|
"alignment_intermediate_size": 28672,
|
||||||
|
"architectures": [
|
||||||
|
"AyaVisionForConditionalGeneration"
|
||||||
|
],
|
||||||
|
"downsample_factor": 2,
|
||||||
|
"image_token_index": 255036,
|
||||||
|
"max_splits_per_img": 12,
|
||||||
|
"model_type": "aya_vision",
|
||||||
|
"projector_hidden_act": "gelu",
|
||||||
|
"text_config": {
|
||||||
|
"hidden_size": 4096,
|
||||||
|
"intermediate_size": 14336,
|
||||||
|
"logit_scale": 0.25,
|
||||||
|
"model_type": "cohere2",
|
||||||
|
"num_attention_heads": 32,
|
||||||
|
"num_hidden_layers": 32,
|
||||||
|
"num_key_value_heads": 8,
|
||||||
|
"rope_theta": 50000,
|
||||||
|
"torch_dtype": "float16",
|
||||||
|
"use_qk_norm": false
|
||||||
|
},
|
||||||
|
"torch_dtype": "float16",
|
||||||
|
"transformers_version": "4.50.0.dev0",
|
||||||
|
"vision_config": {
|
||||||
|
"hidden_size": 1152,
|
||||||
|
"image_size": 364,
|
||||||
|
"intermediate_size": 4304,
|
||||||
|
"model_type": "siglip_vision_model",
|
||||||
|
"num_attention_heads": 16,
|
||||||
|
"num_hidden_layers": 27,
|
||||||
|
"patch_size": 14,
|
||||||
|
"torch_dtype": "float16",
|
||||||
|
"vision_use_head": false
|
||||||
|
},
|
||||||
|
"vision_feature_layer": -1,
|
||||||
|
"vision_feature_select_strategy": "full"
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
{"framework": "pytorch", "task": "image-text-to-text", "allow_remote": true}
|
|
@ -0,0 +1,8 @@
|
||||||
|
{
|
||||||
|
"_from_model_config": true,
|
||||||
|
"bos_token_id": 5,
|
||||||
|
"cache_implementation": "hybrid",
|
||||||
|
"eos_token_id": 255001,
|
||||||
|
"pad_token_id": 0,
|
||||||
|
"transformers_version": "4.50.0.dev0"
|
||||||
|
}
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,708 @@
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"total_size": 17263684064
|
||||||
|
},
|
||||||
|
"weight_map": {
|
||||||
|
"language_model.model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.15.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.15.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.15.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.15.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.28.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.28.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.28.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.28.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.28.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.28.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.28.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.28.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.29.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.29.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.29.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.29.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.29.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.29.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.3.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.3.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.30.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.30.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.30.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.30.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.4.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.4.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.4.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.5.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.5.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.5.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.5.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.5.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.5.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||||
|
"language_model.model.norm.weight": "model-00004-of-00004.safetensors",
|
||||||
|
"multi_modal_projector.layernorm.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"multi_modal_projector.layernorm.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"multi_modal_projector.linear_1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"multi_modal_projector.linear_1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"multi_modal_projector.linear_2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"multi_modal_projector.linear_2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.embeddings.patch_embedding.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.embeddings.position_embedding.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00004.safetensors",
|
||||||
|
"vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00004.safetensors"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
{
|
||||||
|
"crop_to_patches": false,
|
||||||
|
"do_convert_rgb": true,
|
||||||
|
"do_normalize": true,
|
||||||
|
"do_rescale": true,
|
||||||
|
"do_resize": true,
|
||||||
|
"image_mean": [
|
||||||
|
0.5,
|
||||||
|
0.5,
|
||||||
|
0.5
|
||||||
|
],
|
||||||
|
"image_processor_type": "GotOcr2ImageProcessor",
|
||||||
|
"image_std": [
|
||||||
|
0.5,
|
||||||
|
0.5,
|
||||||
|
0.5
|
||||||
|
],
|
||||||
|
"max_patches": 12,
|
||||||
|
"min_patches": 1,
|
||||||
|
"processor_class": "AyaVisionProcessor",
|
||||||
|
"resample": 3,
|
||||||
|
"rescale_factor": 0.00392156862745098,
|
||||||
|
"size": {
|
||||||
|
"height": 364,
|
||||||
|
"width": 364
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
{
|
||||||
|
"end_of_img_token": "<|END_OF_IMG|>",
|
||||||
|
"image_token": "<image>",
|
||||||
|
"img_line_break_token": "<|IMG_LINE_BREAK|>",
|
||||||
|
"img_patch_token": "<|IMG_PATCH|>",
|
||||||
|
"img_size": 364,
|
||||||
|
"patch_size": 28,
|
||||||
|
"processor_class": "AyaVisionProcessor",
|
||||||
|
"start_of_img_token": "<|START_OF_IMG|>",
|
||||||
|
"tile_global_token": "TILE_GLOBAL",
|
||||||
|
"tile_token": "TILE",
|
||||||
|
"vision_feature_select_strategy": "full"
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
{
|
||||||
|
"bos_token": {
|
||||||
|
"content": "<BOS_TOKEN>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"eos_token": {
|
||||||
|
"content": "<|END_OF_TURN_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"pad_token": {
|
||||||
|
"content": "<PAD>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,392 @@
|
||||||
|
{
|
||||||
|
"add_bos_token": true,
|
||||||
|
"add_eos_token": false,
|
||||||
|
"add_prefix_space": false,
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"0": {
|
||||||
|
"content": "<PAD>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"content": "<UNK>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"2": {
|
||||||
|
"content": "<CLS>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"3": {
|
||||||
|
"content": "<SEP>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"4": {
|
||||||
|
"content": "<MASK_TOKEN>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"5": {
|
||||||
|
"content": "<BOS_TOKEN>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"6": {
|
||||||
|
"content": "<EOS_TOKEN>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"7": {
|
||||||
|
"content": "<EOP_TOKEN>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"255000": {
|
||||||
|
"content": "<|START_OF_TURN_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255001": {
|
||||||
|
"content": "<|END_OF_TURN_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"255002": {
|
||||||
|
"content": "<|YES_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255003": {
|
||||||
|
"content": "<|NO_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255004": {
|
||||||
|
"content": "<|GOOD_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255005": {
|
||||||
|
"content": "<|BAD_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255006": {
|
||||||
|
"content": "<|USER_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255007": {
|
||||||
|
"content": "<|CHATBOT_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255008": {
|
||||||
|
"content": "<|SYSTEM_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255009": {
|
||||||
|
"content": "<|USER_0_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255010": {
|
||||||
|
"content": "<|USER_1_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255011": {
|
||||||
|
"content": "<|USER_2_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255012": {
|
||||||
|
"content": "<|USER_3_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255013": {
|
||||||
|
"content": "<|USER_4_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255014": {
|
||||||
|
"content": "<|USER_5_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255015": {
|
||||||
|
"content": "<|USER_6_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255016": {
|
||||||
|
"content": "<|USER_7_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255017": {
|
||||||
|
"content": "<|USER_8_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255018": {
|
||||||
|
"content": "<|USER_9_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255019": {
|
||||||
|
"content": "<|START_THINKING|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255020": {
|
||||||
|
"content": "<|END_THINKING|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255021": {
|
||||||
|
"content": "<|START_RESPONSE|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"255022": {
|
||||||
|
"content": "<|END_RESPONSE|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"255023": {
|
||||||
|
"content": "<|START_ACTION|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255024": {
|
||||||
|
"content": "<|END_ACTION|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255025": {
|
||||||
|
"content": "<|START_TOOL_RESULT|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255026": {
|
||||||
|
"content": "<|END_TOOL_RESULT|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255027": {
|
||||||
|
"content": "<|EXTRA_8_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255028": {
|
||||||
|
"content": "<|NEW_FILE|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"255029": {
|
||||||
|
"content": "<|BEGINNING_OF_PREFIX_FIM_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255030": {
|
||||||
|
"content": "<|BEGINNING_OF_MIDDLE_FIM_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255031": {
|
||||||
|
"content": "<|BEGINNING_OF_SUFFIX_FIM_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255032": {
|
||||||
|
"content": "<|END_OF_MIDDLE_FIM_TOKEN|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255033": {
|
||||||
|
"content": "<|START_OF_IMG|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255034": {
|
||||||
|
"content": "<|END_OF_IMG|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255035": {
|
||||||
|
"content": "<|IMG_LINE_BREAK|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
},
|
||||||
|
"255036": {
|
||||||
|
"content": "<|IMG_PATCH|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"bos_token": "<BOS_TOKEN>",
|
||||||
|
"chat_template": [
|
||||||
|
{
|
||||||
|
"name": "default",
|
||||||
|
"template": "{{ bos_token }}<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble\nYou are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes. When analyzing images, carefully describe and interpret their content while avoiding any promotion of harm, misinformation, or bias.\n\nYou are Aya Vision, a vision-language model built by Cohere for AI. You have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew. You are capable of interpreting images, including describing them, answering questions about their contents, extracting textual information, and analyzing visual context. Your responses must maintain the highest standards of quality, accuracy, and safety.\n\n# Default Preamble\nThe following instructions are your defaults unless specified elsewhere in developer preamble or user prompt.\n- Your name is Aya Vision.\n- You are a large language model built by Cohere for AI.\n- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions.\n- If the input is ambiguous, ask clarifying follow-up questions.\n- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks).\n- Use LaTeX to generate mathematical notation for complex equations.\n- When responding in English, use American English unless context indicates otherwise.\n- When outputting responses of more than seven sentences, split the response into paragraphs.\n- Prefer the active voice.\n- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references.\n- Use gender-neutral pronouns for unspecified persons.\n- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list.\n- Use the third person when asked to write a summary.\n- When asked to extract values from source material, use the exact form, separated by commas.\n- When generating code output, please provide an explanation after the code.\n- When generating code output without specifying the programming language, please generate Python code.\n- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer.\n<|END_OF_TURN_TOKEN|>\n{%- for message in messages -%}\n <|START_OF_TURN_TOKEN|>{{ message.role | replace(\"user\", \"<|USER_TOKEN|>\") | replace(\"assistant\", \"<|CHATBOT_TOKEN|><|START_RESPONSE|>\") | replace(\"system\", \"<|SYSTEM_TOKEN|>\") }}\n {%- if message.content is defined -%}\n {%- if message.content is string -%}\n{{ message.content }}\n {%- else -%}\n {%- for item in message.content | selectattr('type', 'equalto', 'image') -%}\n<image>\n {%- endfor -%}\n {%- for item in message.content | selectattr('type', 'equalto', 'text') -%}\n{{ item.text }}\n {%- endfor -%}\n {%- endif -%}\n {%- elif message.message is defined -%}\n {%- if message.message is string -%}\n{{ message.message }}\n {%- else -%}\n {%- for item in message.message | selectattr('type', 'equalto', 'image') -%}\n<image>\n {%- endfor -%}\n {%- for item in message.message | selectattr('type', 'equalto', 'text') -%}\n{{ item.text }}\n {%- endfor -%}\n {%- endif -%}\n {%- endif -%}\n {%- if message.role == \"assistant\" -%}\n<|END_RESPONSE|>\n {%- endif -%}\n<|END_OF_TURN_TOKEN|>\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\n{%- endif -%}\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "<|END_OF_TURN_TOKEN|>",
|
||||||
|
"extra_special_tokens": {},
|
||||||
|
"legacy": true,
|
||||||
|
"max_length": null,
|
||||||
|
"merges_file": null,
|
||||||
|
"model_max_length": 1000000000000000019884624838656,
|
||||||
|
"pad_to_multiple_of": null,
|
||||||
|
"pad_token": "<PAD>",
|
||||||
|
"pad_token_type_id": 0,
|
||||||
|
"padding_side": "left",
|
||||||
|
"processor_class": "AyaVisionProcessor",
|
||||||
|
"sp_model_kwargs": {},
|
||||||
|
"spaces_between_special_tokens": false,
|
||||||
|
"tokenizer_class": "CohereTokenizer",
|
||||||
|
"unk_token": null,
|
||||||
|
"use_default_system_prompt": false,
|
||||||
|
"vocab_file": null
|
||||||
|
}
|
Loading…
Reference in New Issue