forked from ailab/Qwen2-Audio-7B-Instruct
first commit
This commit is contained in:
commit
67d89e59c7
|
@ -0,0 +1,35 @@
|
|||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
@ -0,0 +1,200 @@
|
|||
---
|
||||
license: apache-2.0
|
||||
language:
|
||||
- en
|
||||
tags:
|
||||
- chat
|
||||
- audio
|
||||
---
|
||||
|
||||
# Qwen2-Audio-7B-Instruct
|
||||
|
||||
## Introduction
|
||||
|
||||
Qwen2-Audio is the new series of Qwen large audio-language models. Qwen2-Audio is capable of accepting various audio signal inputs and performing audio analysis or direct textual responses with regard to speech instructions. We introduce two distinct audio interaction modes:
|
||||
|
||||
* voice chat: users can freely engage in voice interactions with Qwen2-Audio without text input;
|
||||
|
||||
* audio analysis: users could provide audio and text instructions for analysis during the interaction;
|
||||
|
||||
We release Qwen2-Audio-7B and Qwen2-Audio-7B-Instruct, which are pretrained model and chat model respectively.
|
||||
|
||||
For more details, please refer to our [Blog](https://qwenlm.github.io/blog/qwen2-audio/), [GitHub](https://github.com/QwenLM/Qwen2-Audio), and [Report](https://www.arxiv.org/abs/2407.10759).
|
||||
<br>
|
||||
|
||||
|
||||
## Requirements
|
||||
The code of Qwen2-Audio has been in the latest Hugging face transformers and we advise you to build from source with command `pip install git+https://github.com/huggingface/transformers`, or you might encounter the following error:
|
||||
```
|
||||
KeyError: 'qwen2-audio'
|
||||
```
|
||||
|
||||
## Quickstart
|
||||
|
||||
In the following, we demonstrate how to use `Qwen2-Audio-7B-Instruct` for the inference, supporting both voice chat and audio analysis modes. Note that we have used the ChatML format for dialog, in this demo we show how to leverage `apply_chat_template` for this purpose.
|
||||
|
||||
### Voice Chat Inference
|
||||
In the voice chat mode, users can freely engage in voice interactions with Qwen2-Audio without text input:
|
||||
```python
|
||||
from io import BytesIO
|
||||
from urllib.request import urlopen
|
||||
import librosa
|
||||
from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor
|
||||
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct")
|
||||
model = Qwen2AudioForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct", device_map="auto")
|
||||
|
||||
conversation = [
|
||||
{"role": "user", "content": [
|
||||
{"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/guess_age_gender.wav"},
|
||||
]},
|
||||
{"role": "assistant", "content": "Yes, the speaker is female and in her twenties."},
|
||||
{"role": "user", "content": [
|
||||
{"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/translate_to_chinese.wav"},
|
||||
]},
|
||||
]
|
||||
text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
|
||||
audios = []
|
||||
for message in conversation:
|
||||
if isinstance(message["content"], list):
|
||||
for ele in message["content"]:
|
||||
if ele["type"] == "audio":
|
||||
audios.append(librosa.load(
|
||||
BytesIO(urlopen(ele['audio_url']).read()),
|
||||
sr=processor.feature_extractor.sampling_rate)[0]
|
||||
)
|
||||
|
||||
inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
|
||||
inputs.input_ids = inputs.input_ids.to("cuda")
|
||||
|
||||
generate_ids = model.generate(**inputs, max_length=256)
|
||||
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
||||
|
||||
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
```
|
||||
|
||||
### Audio Analysis Inference
|
||||
In the audio analysis, users could provide both audio and text instructions for analysis:
|
||||
```python
|
||||
from io import BytesIO
|
||||
from urllib.request import urlopen
|
||||
import librosa
|
||||
from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor
|
||||
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct")
|
||||
model = Qwen2AudioForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct", device_map="auto")
|
||||
|
||||
conversation = [
|
||||
{'role': 'system', 'content': 'You are a helpful assistant.'},
|
||||
{"role": "user", "content": [
|
||||
{"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"},
|
||||
{"type": "text", "text": "What's that sound?"},
|
||||
]},
|
||||
{"role": "assistant", "content": "It is the sound of glass shattering."},
|
||||
{"role": "user", "content": [
|
||||
{"type": "text", "text": "What can you do when you hear that?"},
|
||||
]},
|
||||
{"role": "assistant", "content": "Stay alert and cautious, and check if anyone is hurt or if there is any damage to property."},
|
||||
{"role": "user", "content": [
|
||||
{"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/1272-128104-0000.flac"},
|
||||
{"type": "text", "text": "What does the person say?"},
|
||||
]},
|
||||
]
|
||||
text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
|
||||
audios = []
|
||||
for message in conversation:
|
||||
if isinstance(message["content"], list):
|
||||
for ele in message["content"]:
|
||||
if ele["type"] == "audio":
|
||||
audios.append(
|
||||
librosa.load(
|
||||
BytesIO(urlopen(ele['audio_url']).read()),
|
||||
sr=processor.feature_extractor.sampling_rate)[0]
|
||||
)
|
||||
|
||||
inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
|
||||
inputs.input_ids = inputs.input_ids.to("cuda")
|
||||
|
||||
generate_ids = model.generate(**inputs, max_length=256)
|
||||
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
||||
|
||||
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
```
|
||||
|
||||
### Batch Inference
|
||||
We also support batch inference:
|
||||
```python
|
||||
from io import BytesIO
|
||||
from urllib.request import urlopen
|
||||
import librosa
|
||||
from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor
|
||||
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct")
|
||||
model = Qwen2AudioForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct", device_map="auto")
|
||||
|
||||
conversation1 = [
|
||||
{"role": "user", "content": [
|
||||
{"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"},
|
||||
{"type": "text", "text": "What's that sound?"},
|
||||
]},
|
||||
{"role": "assistant", "content": "It is the sound of glass shattering."},
|
||||
{"role": "user", "content": [
|
||||
{"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav"},
|
||||
{"type": "text", "text": "What can you hear?"},
|
||||
]}
|
||||
]
|
||||
|
||||
conversation2 = [
|
||||
{"role": "user", "content": [
|
||||
{"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/1272-128104-0000.flac"},
|
||||
{"type": "text", "text": "What does the person say?"},
|
||||
]},
|
||||
]
|
||||
|
||||
conversations = [conversation1, conversation2]
|
||||
|
||||
text = [processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) for conversation in conversations]
|
||||
|
||||
audios = []
|
||||
for conversation in conversations:
|
||||
for message in conversation:
|
||||
if isinstance(message["content"], list):
|
||||
for ele in message["content"]:
|
||||
if ele["type"] == "audio":
|
||||
audios.append(
|
||||
librosa.load(
|
||||
BytesIO(urlopen(ele['audio_url']).read()),
|
||||
sr=processor.feature_extractor.sampling_rate)[0]
|
||||
)
|
||||
|
||||
inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
|
||||
inputs['input_ids'] = inputs['input_ids'].to("cuda")
|
||||
inputs.input_ids = inputs.input_ids.to("cuda")
|
||||
|
||||
generate_ids = model.generate(**inputs, max_length=256)
|
||||
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
||||
|
||||
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
||||
```
|
||||
|
||||
## Citation
|
||||
|
||||
If you find our work helpful, feel free to give us a cite.
|
||||
|
||||
```BibTeX
|
||||
@article{Qwen2-Audio,
|
||||
title={Qwen2-Audio Technical Report},
|
||||
author={Chu, Yunfei and Xu, Jin and Yang, Qian and Wei, Haojie and Wei, Xipin and Guo, Zhifang and Leng, Yichong and Lv, Yuanjun and He, Jinzheng and Lin, Junyang and Zhou, Chang and Zhou, Jingren},
|
||||
journal={arXiv preprint arXiv:2407.10759},
|
||||
year={2024}
|
||||
}
|
||||
```
|
||||
|
||||
```BibTeX
|
||||
@article{Qwen-Audio,
|
||||
title={Qwen-Audio: Advancing Universal Audio Understanding via Unified Large-Scale Audio-Language Models},
|
||||
author={Chu, Yunfei and Xu, Jin and Zhou, Xiaohuan and Yang, Qian and Zhang, Shiliang and Yan, Zhijie and Zhou, Chang and Zhou, Jingren},
|
||||
journal={arXiv preprint arXiv:2311.07919},
|
||||
year={2023}
|
||||
}
|
||||
```
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
"architectures": [
|
||||
"Qwen2AudioForConditionalGeneration"
|
||||
],
|
||||
"audio_config": {
|
||||
"model_type": "qwen2_audio_encoder",
|
||||
"num_mel_bins": 128,
|
||||
"encoder_layers": 32,
|
||||
"encoder_attention_heads": 20,
|
||||
"encoder_ffn_dim": 5120,
|
||||
"d_model": 1280,
|
||||
"activation_function": "gelu",
|
||||
"scale_embedding": false,
|
||||
"max_source_positions": 1500
|
||||
},
|
||||
"audio_token_index": 151646,
|
||||
"ignore_index": -100,
|
||||
"model_type": "qwen2_audio",
|
||||
"text_config": {
|
||||
"bos_token_id": 151643,
|
||||
"eos_token_id": 151645,
|
||||
"intermediate_size": 11008,
|
||||
"max_position_embeddings": 8192,
|
||||
"model_type": "qwen2",
|
||||
"rope_theta": 10000,
|
||||
"rms_norm_eps": 1e-5,
|
||||
"sliding_window": 32768,
|
||||
"torch_dtype": "bfloat16",
|
||||
"use_mrope": false,
|
||||
"vocab_size": 156032
|
||||
},
|
||||
"transformers_version": "4.38.1",
|
||||
"vocab_size": 156032
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
"chat_format": "chatml",
|
||||
"eos_token_id": [151643,151645],
|
||||
"pad_token_id": 151643,
|
||||
"do_sample": true,
|
||||
"top_k": 20,
|
||||
"top_p": 0.5,
|
||||
"temperature": 0.7,
|
||||
"repetition_penalty": 1.1,
|
||||
"transformers_version": "4.38.1"
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,883 @@
|
|||
{
|
||||
"metadata": {
|
||||
"total_size": 16794189824
|
||||
},
|
||||
"weight_map": {
|
||||
"audio_tower.conv1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.conv1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.conv2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.conv2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.embed_positions.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.0.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.1.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.10.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.11.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.12.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.13.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.14.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.15.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.16.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.17.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.18.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.19.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.2.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.20.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.21.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.22.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.23.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.24.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.25.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.26.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.27.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.28.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.29.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.3.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.30.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.31.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.4.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.5.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.6.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.7.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.8.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.fc1.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.fc1.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.fc2.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.fc2.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.final_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.final_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
|
||||
"audio_tower.layers.9.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.lm_head.weight": "model-00005-of-00005.safetensors",
|
||||
"language_model.model.embed_tokens.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.self_attn.k_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.self_attn.k_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.13.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.13.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.13.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.13.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.13.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.13.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.13.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.13.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.13.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.14.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.15.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.16.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.17.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.18.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.19.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.self_attn.k_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.20.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.20.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
||||
"language_model.model.layers.23.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.23.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.24.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.25.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.26.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.27.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.28.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.29.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.3.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.3.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.3.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.3.self_attn.k_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.3.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.3.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
||||
"language_model.model.layers.30.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.30.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.31.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
||||
"language_model.model.layers.4.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.4.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.5.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.7.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
||||
"language_model.model.norm.weight": "model-00004-of-00005.safetensors",
|
||||
"multi_modal_projector.linear.bias": "model-00001-of-00005.safetensors",
|
||||
"multi_modal_projector.linear.weight": "model-00001-of-00005.safetensors"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"chunk_length": 30,
|
||||
"feature_extractor_type": "WhisperFeatureExtractor",
|
||||
"feature_size": 128,
|
||||
"hop_length": 160,
|
||||
"n_fft": 400,
|
||||
"n_samples": 480000,
|
||||
"nb_max_frames": 3000,
|
||||
"padding_side": "right",
|
||||
"padding_value": 0.0,
|
||||
"processor_class": "Qwen2AudioProcessor",
|
||||
"return_attention_mask": true,
|
||||
"sampling_rate": 16000
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue