From 51a1a0468ae28f7ecfc9855f597dcde217db59ec Mon Sep 17 00:00:00 2001
From: yang yingjie
Date: Tue, 10 Jun 2025 15:17:04 +0800
Subject: [PATCH] first commit
---
added_tokens.json | 10 +
config.json | 37 +
configuration_minicpm.py | 203 +
generation_config.json | 12 +
model.safetensors | 3 +
modeling_minicpm.py | 2509 +
special_tokens_map.json | 33 +
tokenizer.json | 490843 ++++++++++++++++++++++++++++++++++++
tokenizer.model | 3 +
tokenizer_config.json | 117 +
10 files changed, 493770 insertions(+)
create mode 100644 added_tokens.json
create mode 100644 config.json
create mode 100644 configuration_minicpm.py
create mode 100644 generation_config.json
create mode 100644 model.safetensors
create mode 100644 modeling_minicpm.py
create mode 100644 special_tokens_map.json
create mode 100644 tokenizer.json
create mode 100644 tokenizer.model
create mode 100644 tokenizer_config.json
diff --git a/added_tokens.json b/added_tokens.json
new file mode 100644
index 0000000..7f0cedc
--- /dev/null
+++ b/added_tokens.json
@@ -0,0 +1,10 @@
+{
+ "<|execute_end|>": 73444,
+ "<|execute_start|>": 73443,
+ "<|fim_middle|>": 73446,
+ "<|fim_prefix|>": 73445,
+ "<|fim_suffix|>": 73447,
+ "<|im_end|>": 73440,
+ "<|im_start|>": 73441,
+ "<|tool_call|>": 73442
+}
diff --git a/config.json b/config.json
new file mode 100644
index 0000000..ef0ad71
--- /dev/null
+++ b/config.json
@@ -0,0 +1,37 @@
+{
+ "_name_or_path": "openbmb/MiniCPM4-0.5B",
+ "architectures": [
+ "MiniCPMForCausalLM"
+ ],
+ "auto_map": {
+ "AutoConfig": "configuration_minicpm.MiniCPMConfig",
+ "AutoModel": "modeling_minicpm.MiniCPMModel",
+ "AutoModelForCausalLM": "modeling_minicpm.MiniCPMForCausalLM",
+ "AutoModelForSeq2SeqLM": "modeling_minicpm.MiniCPMForCausalLM",
+ "AutoModelForSequenceClassification": "modeling_minicpm.MiniCPMForSequenceClassification"
+ },
+ "bos_token_id": 1,
+ "eos_token_id": [2, 73440],
+ "hidden_act": "silu",
+ "hidden_size": 1024,
+ "initializer_range": 0.1,
+ "intermediate_size": 4096,
+ "max_position_embeddings": 32768,
+ "num_attention_heads": 16,
+ "num_hidden_layers": 24,
+ "num_key_value_heads": 2,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": {
+ "rope_type": "longrope",
+ "long_factor": [1.0004360675811768, 1.0668443441390991, 1.1631425619125366, 1.3025742769241333, 1.5040205717086792, 1.7941505908966064, 2.2101221084594727, 2.802666664123535, 3.6389970779418945, 4.804192543029785, 6.39855432510376, 8.527148246765137, 11.277542114257812, 14.684998512268066, 18.69317054748535, 23.13019371032715, 27.72362518310547, 32.1606559753418, 36.168827056884766, 39.57627868652344, 42.32667541503906, 44.45526885986328, 46.04962921142578, 47.21482849121094, 48.05115509033203, 48.64370346069336, 49.05967712402344, 49.34980392456055, 49.551246643066406, 49.69068145751953, 49.78697967529297, 49.85338592529297],
+ "short_factor": [1.0004360675811768, 1.0668443441390991, 1.1631425619125366, 1.3025742769241333, 1.5040205717086792, 1.7941505908966064, 2.2101221084594727, 2.802666664123535, 3.6389970779418945, 4.804192543029785, 6.39855432510376, 8.527148246765137, 11.277542114257812, 14.684998512268066, 18.69317054748535, 23.13019371032715, 27.72362518310547, 32.1606559753418, 36.168827056884766, 39.57627868652344, 42.32667541503906, 44.45526885986328, 46.04962921142578, 47.21482849121094, 48.05115509033203, 48.64370346069336, 49.05967712402344, 49.34980392456055, 49.551246643066406, 49.69068145751953, 49.78697967529297, 49.85338592529297],
+ "original_max_position_embeddings": 32768
+ },
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.46.3",
+ "use_cache": true,
+ "vocab_size": 73448,
+ "scale_emb": 12,
+ "dim_model_base": 256,
+ "scale_depth": 1.4
+}
\ No newline at end of file
diff --git a/configuration_minicpm.py b/configuration_minicpm.py
new file mode 100644
index 0000000..8fd0bc0
--- /dev/null
+++ b/configuration_minicpm.py
@@ -0,0 +1,203 @@
+# coding=utf-8
+# Copyright 2025 The OpenBMB Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MiniCPM model configuration"""
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+MINICPM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
+
+
+class MiniCPMConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MiniCPMModel`]. It is used to instantiate an MiniCPM
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the MiniCPM-7B.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the MiniCPM model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`MiniCPMModel`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 11008):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer decoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ num_key_value_heads (`int`, *optional*):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
+ `num_attention_heads`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. MiniCPM 1 supports up to 2048 tokens,
+ MiniCPM 2 up to 4096, CodeMiniCPM up to 16384.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ pad_token_id (`int`, *optional*):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ End of stream token id.
+ pretraining_tp (`int`, *optional*, defaults to 1):
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
+ issue](https://github.com/pytorch/pytorch/issues/76232).
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalMiniCPM/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
+ experimental feature, subject to breaking API changes in future versions.
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+
+ ```python
+ >>> from transformers import MiniCPMModel, MiniCPMConfig
+
+ >>> # Initializing a MiniCPM minicpm-7b style configuration
+ >>> configuration = MiniCPMConfig()
+
+ >>> # Initializing a model from the minicpm-7b style configuration
+ >>> model = MiniCPMModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = 'minicpm'
+ keys_to_ignore_at_inference = ['past_key_values']
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ hidden_act='silu',
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ pad_token_id=None,
+ bos_token_id=1,
+ eos_token_id=2,
+ pretraining_tp=1,
+ tie_word_embeddings=True,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ attention_bias=False,
+ attention_dropout=0.0,
+ scale_emb=1,
+ dim_model_base=1,
+ scale_depth=1,
+ mup_denominator=None,
+ sparse_config=None,
+ **kwargs):
+
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.pretraining_tp = pretraining_tp
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ # self._rope_scaling_validation()
+ self.attention_bias = attention_bias
+ self.attention_dropout = attention_dropout
+ self.scale_emb = scale_emb
+ self.dim_model_base = dim_model_base
+ self.scale_depth = scale_depth
+ # only used for Eagle Head
+ self.mup_denominator = mup_denominator
+
+ # sparse config
+ self.sparse_config = sparse_config
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+ try:
+ import flash_attn
+ self._attn_implementation = 'flash_attention_2'
+ except:
+ pass
+
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
+ f'got {self.rope_scaling}'
+ )
+ rope_scaling_type = self.rope_scaling.get('type', None)
+ rope_scaling_factor = self.rope_scaling.get('factor', None)
+ if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
diff --git a/generation_config.json b/generation_config.json
new file mode 100644
index 0000000..cf2547d
--- /dev/null
+++ b/generation_config.json
@@ -0,0 +1,12 @@
+{
+ "bos_token_id": 1,
+ "do_sample": true,
+ "eos_token_id": [
+ 2,
+ 73440
+ ],
+ "pad_token_id": 2,
+ "temperature": 0.8,
+ "top_p": 0.8,
+ "transformers_version": "4.46.1"
+}
diff --git a/model.safetensors b/model.safetensors
new file mode 100644
index 0000000..29cfae6
--- /dev/null
+++ b/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef74f4de1d1c580dd64e425d11f29881b9fd39f9be16a485150f727d4583181c
+size 867772648
diff --git a/modeling_minicpm.py b/modeling_minicpm.py
new file mode 100644
index 0000000..23a3705
--- /dev/null
+++ b/modeling_minicpm.py
@@ -0,0 +1,2509 @@
+# coding=utf-8
+# Copyright 2025 The OpenBMB Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch MiniCPM model."""
+import math
+import re
+import warnings
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+from transformers.activations import ACT2FN
+from transformers.cache_utils import Cache, DynamicCache
+from transformers.modeling_attn_mask_utils import (
+ AttentionMaskConverter,
+ _prepare_4d_attention_mask,
+ _prepare_4d_causal_attention_mask,
+ _prepare_4d_causal_attention_mask_for_sdpa,
+)
+from transformers.modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+ SequenceClassifierOutputWithPast,
+)
+from transformers.modeling_utils import PreTrainedModel
+from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
+from transformers.utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from transformers.utils.import_utils import is_torch_fx_available
+
+from .configuration_minicpm import MiniCPMConfig
+
+try:
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+ from infllm_v2 import (
+ infllmv2_attn_stage1,
+ infllmv2_attn_varlen_func,
+ infllmv2_attn_with_kvcache,
+ max_pooling_1d,
+ )
+except:
+ pass
+
+from functools import lru_cache
+
+
+def compressed_attention(
+ q: torch.Tensor,
+ k: torch.Tensor,
+ v: torch.Tensor,
+ kernel_size: int,
+ kernel_stride: int,
+ block_size: int,
+ topk: int,
+ cu_seqlens_q: torch.Tensor,
+ cu_seqlens_k: torch.Tensor,
+ max_seqlen_q: int,
+ max_seqlen_k: int,
+ sm_scale: float = None,
+ init_blocks: int = 1,
+ local_blocks: int = 2,
+ parallel_topk_compute: Union[str, bool] = 'auto',
+ total_seq_lens=-1,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Attention between query and compressed key and value. Compute attention output and topk block idx used in topk_sparse_attention.
+
+ Args:
+ q (torch.Tensor): shape [total_q_len, num_q_heads, head_dim]
+ k (torch.Tensor): shape [total_kv_len, num_kv_heads, head_dim]
+ v (torch.Tensor): shape [total_kv_len, num_kv_heads, head_dim]
+ kernel_size (int): kernel size in compress_key_value
+ kernel_stride (int): stride of compress_key_value
+ block_size (int): key value block size for topk sparse attention.
+ topk (int): number of blocks for each query.
+ cu_seqlens_q (torch.Tensor): shape [batch_size + 1], similar to cu_seqlens_q in flash_attn_func_varlen.
+ cu_seqlens_k (torch.Tensor): shape [batch_size + 1], similar to cu_seqlens_k in flash_attn_func_varlen.
+ max_seqlen_q (int): max q len of the batch.
+ max_seqlen_k (int): max k len of the batch.
+ sm_scale (float, optional): softmax scale. Defaults to None, means 1/sqrt(head_dim).
+ init_blocks (int, optional): Number of init blocks for each query. Defaults to 1.
+ local_blocks (int, optional): Number of local blocks for each query. Defaults to 2.
+ parallel_topk_compute (str, optional): Only set it to False when the sequence length is too long. This can avoid a current bug.
+ We'll fix this issue later. Defaults to auto, it will be set to False when the sequence length is greater than 32k and True otherwise.
+
+ Returns:
+ Tuple[torch.Tensor, torch.Tensor]: attention output and topk_idx used in topk_sparse_attention
+ """
+ with torch.no_grad():
+ cache_len = 0
+ batch_size = cu_seqlens_q.shape[0] - 1
+ if total_seq_lens == -1:
+ total_seq_lens = max_seqlen_q
+ q_idx = torch.cat(
+ [
+ torch.arange(cu_seqlens_q[i + 1] - cu_seqlens_q[i], device=q.device) + total_seq_lens - (cu_seqlens_q[i + 1] - cu_seqlens_q[i])
+ for i in range(batch_size)
+ ],
+ dim=0,
+ )
+ q_idx = q_idx // block_size
+
+ else:
+ cache_len = total_seq_lens - max_seqlen_q
+ assert batch_size == 1, 'batch_size must be 1 when total_seq_lens is set'
+ q_idx = torch.tensor([total_seq_lens - 1], device=q.device, dtype=torch.int32) // block_size
+
+ score = infllmv2_attn_stage1(
+ q.contiguous(),
+ k.contiguous(),
+ v.contiguous(),
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_q,
+ max_seqlen_k=max_seqlen_k,
+ causal=q_idx.shape[0] > 1)
+ score = score[:, :q_idx.shape[0], :]
+
+ # Replace transform_score with max_pooling_1d
+ block_score = max_pooling_1d(
+ score.contiguous(),
+ cache_len=cache_len,
+ local_blocks=local_blocks,
+ init_blocks=init_blocks,
+ block_size=block_size,
+ stride=kernel_stride,
+ )
+ # get topk
+ topk = min(topk, block_score.shape[-1])
+ topk_idx = block_score.topk(topk, dim=-1).indices.sort(-1).values
+ topk_idx[topk_idx >= q_idx[None, :, None]] = -1
+ topk_idx = topk_idx.to(torch.int32)
+
+ return topk_idx
+
+
+@lru_cache(maxsize=16)
+def calc_chunks_with_stride(cu_seqlen, chunk_size, kernel_stride):
+ """
+ Compute the chunks that require Sparse attention, with stride support.
+
+ Args:
+ cu_seqlen (torch.Tensor): Cumulative sequence lengths for each sample.
+ chunk_size (int): Chunk size used for Sparse attention.
+ kernel_stride (int): Stride size when sliding over the sequence.
+
+ Returns:
+ filtered_indices (torch.Tensor): Indices used to directly index into the key/value tensors.
+ cu_seqlens_compressed (torch.Tensor): Cumulative sequence lengths after compression.
+ """
+ # 1. Compute the length of each sequence
+ batch_sizes = cu_seqlen[1:] - cu_seqlen[:-1]
+
+ # 2. Compute the start positions of chunks for each sequence (with stride)
+ max_seq_len = torch.max(batch_sizes)
+ max_num_chunks_per_seq = (max_seq_len - chunk_size) // kernel_stride + 1
+ chunk_start_offsets = torch.arange(0, max_num_chunks_per_seq * kernel_stride, kernel_stride, device=cu_seqlen.device)
+ seq_starts = cu_seqlen[:-1]
+ chunk_start_in_seq = seq_starts[:, None] + chunk_start_offsets[None, :] # [batch_size, max_num_chunks_per_seq]
+
+ # 3. Filter out chunks that exceed sequence length or are smaller than the full chunk size
+ chunk_end_in_seq = chunk_start_in_seq + chunk_size
+ valid_chunk_mask = (chunk_end_in_seq <= (seq_starts[:, None] + batch_sizes[:, None]))
+
+ # 4. Filter valid chunk start positions using the valid_chunk_mask
+ valid_chunk_starts = chunk_start_in_seq[valid_chunk_mask] # [num_valid_chunks]
+ del chunk_start_in_seq
+ # 5. Generate filtered_indices
+ chunk_indices = torch.arange(
+ 0, chunk_size, device=cu_seqlen.device
+ )[None, :] # [1, chunk_size]
+ filtered_indices = valid_chunk_starts[:, None] + chunk_indices # [num_valid_chunks, chunk_size]
+ filtered_indices = filtered_indices.view(-1) # Flatten to 1D indices
+
+ # 6. Compute compressed cumulative sequence lengths
+ num_filtered_chunks_per_batch = valid_chunk_mask.sum(dim=1) # Number of valid chunks per batch
+ cu_seqlens_compressed = torch.zeros(
+ len(cu_seqlen), dtype=torch.int32, device=cu_seqlen.device
+ )
+ cu_seqlens_compressed[1:] = num_filtered_chunks_per_batch.cumsum(dim=0)
+ del num_filtered_chunks_per_batch, chunk_start_offsets, seq_starts, chunk_end_in_seq, valid_chunk_mask, chunk_indices
+ return filtered_indices, cu_seqlens_compressed
+
+
+class CompressK(torch.nn.Module):
+ def __init__(self, head_num_k, head_dim, kernel_size, kernel_stride=16):
+ """
+ Module for compressing key (K) representations.
+
+ Args:
+ head_num_k (int): Number of key attention heads.
+ head_dim (int): Dimension of each attention head.
+ kernel_size (int): Size of each chunk used for compression.
+ kernel_stride (int, optional): Stride used when dividing input into chunks. Default is 16.
+ """
+ super().__init__()
+ self.kernel_size = kernel_size
+ self.head_num_k = head_num_k
+ self.head_dim = head_dim
+ self.kernel_stride = kernel_stride
+
+ def forward(self, k: torch.Tensor, cu_seqlens):
+ """
+ Forward pass for compressing the key (K) tensor.
+
+ Args:
+ k (torch.Tensor): Input key tensor of shape (total_seq_len, num_heads, head_dim).
+ cu_seqlens (torch.Tensor): Cumulative sequence lengths for each sample in the batch, typically used for handling variable-length sequences.
+
+ Returns:
+ compress_k (torch.Tensor): Compressed key tensor.
+ cu_seqlens_compressed (torch.Tensor): Updated cumulative sequence lengths after compression.
+
+ """
+ # Compute chunk-related metadata, with stride support
+ filtered_k_indices, cu_seqlens_compressed = calc_chunks_with_stride(
+ cu_seqlens, self.kernel_size, self.kernel_stride
+ )
+
+ # Extract filtered key vectors
+ filtered_k = k.index_select(0, filtered_k_indices.view(-1))
+
+ # split
+ filtered_k = filtered_k.view(filtered_k.shape[0] // self.kernel_size, self.kernel_size, self.head_num_k, self.head_dim) # [l, block_size,h,d]
+
+ compressed_k = filtered_k.mean(dim=1)
+ return compressed_k, cu_seqlens_compressed
+
+
+class DynamicCacheQKV(DynamicCache):
+ """
+ A cache that grows dynamically as more tokens are generated. This is the default for generative models.
+
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
+ `[batch_size, num_heads, seq_len, head_dim]`.
+
+ Example:
+ ```python
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
+
+ >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
+ >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
+
+ >>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt")
+
+ >>> # Prepare a cache class and pass it to model's forward
+ >>> past_key_values = DynamicCache()
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
+ >>> outputs.past_key_values # access cache filled with key/values from generation
+ DynamicCache()
+ ```
+ """
+ def __init__(self, num_hidden_layers: Optional[int] = None) -> None:
+ super().__init__()
+ if num_hidden_layers is None:
+ self.key_cache: List[torch.Tensor] = []
+ self.value_cache: List[torch.Tensor] = []
+ self.compress_k_cache: List[torch.Tensor] = []
+ self.no_compress_k_cache: List[torch.Tensor] = []
+ self.cached_compressed_cu_seqlens: List[torch.Tensor] = []
+ self.no_rope_key_cache: List[torch.Tensor] = []
+ else:
+ self.key_cache: List[torch.Tensor] = [[] for _ in range(num_hidden_layers)]
+ self.value_cache: List[torch.Tensor] = [[] for _ in range(num_hidden_layers)]
+ self.compress_k_cache: List[torch.Tensor] = [[] for _ in range(num_hidden_layers)]
+ self.no_compress_k_cache: List[torch.Tensor] = [[] for _ in range(num_hidden_layers)]
+ self.cached_compressed_cu_seqlens: List[torch.Tensor] = [[] for _ in range(num_hidden_layers)]
+ self.no_rope_key_cache: List[torch.Tensor] = [[] for _ in range(num_hidden_layers)]
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
+
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
+ """
+ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
+ sequence length.
+ """
+ if layer_idx < len(self):
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx])
+ else:
+ raise KeyError(f'Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}')
+
+ def __iter__(self):
+ """
+ Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over
+ keys and values
+ """
+ for layer_idx in range(len(self)):
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx])
+
+ def __len__(self):
+ """
+ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds
+ to the number of layers in the model.
+ """
+ return len(self.key_cache)
+
+ def update(
+ self,
+ key_states: torch.Tensor,
+ value_states: torch.Tensor,
+ layer_idx: int,
+ cache_kwargs: Optional[Dict[str, Any]] = None
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
+
+ Parameters:
+ key_states (`torch.Tensor`):
+ The new key states to cache.
+ value_states (`torch.Tensor`):
+ The new value states to cache.
+ layer_idx (`int`):
+ The index of the layer to cache the states for.
+ cache_kwargs (`Dict[str, Any]`, `optional`):
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
+
+ Return:
+ A tuple containing the updated key and value states.
+ """
+ # Update the number of seen tokens
+ if layer_idx == 0:
+ self._seen_tokens += key_states.shape[-2]
+
+ # Update the cache
+ if len(self.key_cache) <= layer_idx:
+ self.key_cache.append(key_states)
+ self.value_cache.append(value_states)
+
+ # content on layer cache can be a tensor and checking not tensor causes errors
+ # so we explicitly check for the empty list
+ elif self.key_cache[layer_idx] == []:
+ self.key_cache[layer_idx] = key_states
+ self.value_cache[layer_idx] = value_states
+
+ else:
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
+
+ def update_no_rope_key(
+ self,
+ key_states: torch.Tensor,
+ layer_idx: int,
+ cache_kwargs: Optional[Dict[str, Any]] = None):
+
+ # Update the cache
+ if len(self.no_rope_key_cache) <= layer_idx:
+ self.no_rope_key_cache.append(key_states)
+
+ # content on layer cache can be a tensor and checking not tensor causes errors
+ # so we explicitly check for the empty list
+ elif self.no_rope_key_cache[layer_idx] == []:
+ self.no_rope_key_cache[layer_idx] = key_states
+ else:
+ self.no_rope_key_cache[layer_idx] = torch.cat([self.no_rope_key_cache[layer_idx], key_states], dim=1)
+ return self.no_rope_key_cache[layer_idx]
+
+ def update_compress_k(
+ self,
+ key_states: torch.Tensor,
+ layer_idx: int,
+ cache_kwargs: Optional[Dict[str, Any]] = None
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
+
+ Parameters:
+ key_states (`torch.Tensor`):
+ The new key states to cache.
+ value_states (`torch.Tensor`):
+ The new value states to cache.
+ layer_idx (`int`):
+ The index of the layer to cache the states for.
+ cache_kwargs (`Dict[str, Any]`, `optional`):
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
+
+ Return:
+ A tuple containing the updated key and value states.
+ """
+
+ # Update the cache
+ if len(self.compress_k_cache) <= layer_idx:
+ self.compress_k_cache.append(key_states)
+
+ # content on layer cache can be a tensor and checking not tensor causes errors
+ # so we explicitly check for the empty list
+ elif self.compress_k_cache[layer_idx] == []:
+ self.compress_k_cache[layer_idx] = key_states
+ else:
+ self.compress_k_cache[layer_idx] = torch.cat([self.compress_k_cache[layer_idx], key_states], dim=0)
+ return self.compress_k_cache[layer_idx]
+
+ def update_no_compress_k(
+ self,
+ key_states: torch.Tensor,
+ layer_idx: int,
+ kernel_size: int = 32,
+ kernel_stride: int = 16,
+ cache_kwargs: Optional[Dict[str, Any]] = None
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
+
+ Parameters:
+ key_states (`torch.Tensor`):
+ The new key states to cache.
+ value_states (`torch.Tensor`):
+ The new value states to cache.
+ layer_idx (`int`):
+ The index of the layer to cache the states for.
+ cache_kwargs (`Dict[str, Any]`, `optional`):
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
+
+ Return:
+ A tuple containing the updated key and value states.
+ """
+ # Update the cache
+ if len(self.no_compress_k_cache) <= layer_idx:
+ self.no_compress_k_cache.append(key_states)
+
+ # content on layer cache can be a tensor and checking not tensor causes errors
+ # so we explicitly check for the empty list
+ elif self.no_compress_k_cache[layer_idx] == []:
+ self.no_compress_k_cache[layer_idx] = key_states
+ else:
+ self.no_compress_k_cache[layer_idx] = torch.cat([self.no_compress_k_cache[layer_idx], key_states], dim=0)
+
+ current_len = self.no_compress_k_cache[layer_idx].shape[0]
+
+ if current_len >= kernel_size:
+ k_chunk = self.no_compress_k_cache[layer_idx][:kernel_size]
+ self.no_compress_k_cache[layer_idx] = self.no_compress_k_cache[layer_idx][kernel_stride:]
+ return k_chunk
+ else:
+ return None
+
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
+ # TODO: deprecate this function in favor of `cache_position`
+ if len(self.key_cache) <= layer_idx or (len(self.key_cache) > layer_idx and self.key_cache[layer_idx] == []):
+ return 0
+ return self.key_cache[layer_idx].shape[-2]
+
+ def get_max_length(self) -> Optional[int]:
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
+ return None
+
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
+ """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format. Used for
+ backward compatibility."""
+ legacy_cache = ()
+ for layer_idx in range(len(self)):
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),)
+ return legacy_cache
+
+ # @classmethod
+ # def from_legacy_cache(
+ # cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, num_hidden_layers: int = None
+ # ) -> "DynamicCacheQKV":
+ # """Converts a cache in the legacy cache format into an equivalent `DynamicCache`. Used for
+ # backward compatibility."""
+ # cache = cls(num_hidden_layers)
+ # if past_key_values is not None:
+ # for layer_idx in range(len(past_key_values)):
+ # key_states, value_states, query_status = past_key_values[layer_idx]
+ # cache.update(key_states, value_states, query_status,layer_idx)
+ # return cache
+
+ def crop(self, max_length: int):
+ """Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be
+ negative to remove `max_length` tokens. This is used in assisted decoding and contrastive search."""
+ # In case it is negative
+ if max_length < 0:
+ max_length = self.get_seq_length() - abs(max_length)
+
+ if self.get_seq_length() <= max_length:
+ return
+
+ self._seen_tokens = max_length
+ for idx in range(len(self.key_cache)):
+ if self.key_cache[idx] != []:
+ self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
+ self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
+
+ def batch_split(self, full_batch_size: int, split_size: int, num_hidden_layers: int) -> List['DynamicCacheQKV']:
+ """Split the current instance into a list of `DynamicCache` by the batch size. This will be used by
+ `_split_model_inputs()` in `generation.utils`"""
+ out = []
+ for i in range(0, full_batch_size, split_size):
+ current_split = DynamicCacheQKV(num_hidden_layers)
+ current_split._seen_tokens = self._seen_tokens
+ current_split.key_cache = [tensor[i: i + split_size] for tensor in self.key_cache]
+ current_split.value_cache = [tensor[i: i + split_size] for tensor in self.value_cache]
+ out.append(current_split)
+ return out
+
+ @classmethod
+ def from_batch_splits(cls, splits: List['DynamicCacheQKV'], num_hidden_layers: int) -> 'DynamicCacheQKV':
+ """This is the opposite of the above `batch_split()` method. This will be used by `stack_model_outputs` in
+ `generation.utils`"""
+ cache = cls(num_hidden_layers)
+ for idx in range(len(splits[0])):
+ key_cache = [current.key_cache[idx] for current in splits if current.key_cache[idx] != []]
+ value_cache = [current.key_cache[idx] for current in splits if current.key_cache[idx] != []]
+ query_cache = [current.key_cache[idx] for current in splits if current.key_cache[idx] != []]
+ if key_cache != []:
+ layer_keys = torch.cat(key_cache, dim=0)
+ layer_values = torch.cat(value_cache, dim=0)
+ layer_query = torch.cat(query_cache, dim=0)
+ cache.update(layer_keys, layer_values, idx, query_states=layer_query)
+ return cache
+
+ def batch_repeat_interleave(self, repeats: int):
+ """Repeat the cache `repeats` times in the batch dimension. Used in contrastive search."""
+ for layer_idx in range(len(self)):
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0)
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0)
+
+ def batch_select_indices(self, indices: torch.Tensor):
+ """Only keep the `indices` in the batch dimension of the cache. Used in contrastive search."""
+ for layer_idx in range(len(self)):
+ self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...]
+ self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...]
+
+
+# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
+# It means that the function will not be traced through and simply appear as a node in the graph.
+if is_torch_fx_available():
+ if not is_torch_greater_or_equal_than_1_13:
+ import torch.fx
+
+ _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = 'MiniCPMConfig'
+
+
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
+ warnings.warn(
+ 'Calling `transformers.models.minicpm.modeling_minicpm._prepare_4d_attention_mask` is deprecated and will be removed in v4.37. Use `transformers.modeling_attn_mask_utils._prepare_4d_attention_mask'
+ )
+ return _prepare_4d_attention_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
+
+
+def _make_causal_mask(
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
+):
+ warnings.warn(
+ 'Calling `transformers.models.minicpm.modeling_minicpm._make_causal_mask` is deprecated and will be removed in v4.37. Use `transformers.models.minicpm.modeling_minicpm.AttentionMaskConverter._make_causal_mask'
+ )
+ return AttentionMaskConverter._make_causal_mask(
+ input_ids_shape=input_ids_shape, dtype=dtype, device=device, past_key_values_length=past_key_values_length
+ )
+
+
+# @torch.jit.script # type: ignore
+def rms_layernorm(hidden: torch.Tensor, weight: torch.Tensor, eps: float):
+ old_dtype = hidden.dtype
+ variance = hidden.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)
+ hidden = (hidden * torch.rsqrt(variance + eps)).to(old_dtype)
+ return hidden * weight
+
+
+class MiniCPMRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ MiniCPMRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ return rms_layernorm(hidden_states, self.weight, self.variance_epsilon)
+
+
+ALL_LAYERNORM_LAYERS.append(MiniCPMRMSNorm)
+
+
+class MiniCPMRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
+
+ # Build here to make `torch.jit.trace` work.
+ self._set_cos_sin_cache(
+ # seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.float32
+ )
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
+
+ def forward(self, x, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if seq_len > self.max_seq_len_cached:
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
+
+ return (
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
+ )
+
+
+class MiniCPMLongRoPE(MiniCPMRotaryEmbedding):
+ """MiniCPMRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
+
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, short_factor=None, long_factor=None, original_max_position_embeddings=None):
+ self.short_factor = short_factor
+ self.long_factor = long_factor
+ self.original_max_position_embeddings = original_max_position_embeddings
+ scale = (max_position_embeddings / self.original_max_position_embeddings)
+ self.scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
+ super().__init__(dim, max_position_embeddings, base, device)
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
+ if seq_len > self.original_max_position_embeddings:
+ ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=device)
+ else:
+ ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=device)
+
+ freqs = torch.mul(
+ torch.outer(t, 1.0 / ext_factors).to(device=device),
+ self.inv_freq.to(device=device).to(dtype)
+ )
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer('cos_cached', emb.cos().to(dtype) * self.scaling_factor, persistent=False)
+ self.register_buffer('sin_cached', emb.sin().to(dtype) * self.scaling_factor, persistent=False)
+
+
+class MiniCPMLinearScalingRotaryEmbedding(MiniCPMRotaryEmbedding):
+ """MiniCPMRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
+
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ self.scaling_factor = scaling_factor
+ super().__init__(dim, max_position_embeddings, base, device)
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
+ t = t / self.scaling_factor
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
+
+
+class MiniCPMDynamicNTKScalingRotaryEmbedding(MiniCPMRotaryEmbedding):
+ """MiniCPMRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
+
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ self.scaling_factor = scaling_factor
+ super().__init__(dim, max_position_embeddings, base, device)
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+
+ if seq_len > self.max_position_embeddings:
+ base = self.base * (
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
+ ) ** (self.dim / (self.dim - 2))
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
+
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2:]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ # cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ # sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ # q_embed = (q * cos) + (rotate_half(q) * sin)
+ # k_embed = (k * cos) + (rotate_half(k) * sin)
+ orig_dtype = k.dtype
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim) # [bs, 1, seq_len, dim]
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim) # [bs, 1, seq_len, dim]
+ q_fp32 = q.to(dtype=torch.float32, device=q.device)
+ k_fp32 = k.to(dtype=torch.float32, device=k.device)
+ q_embed = (q_fp32 * cos) + (rotate_half(q_fp32) * sin)
+ k_embed = (k_fp32 * cos) + (rotate_half(k_fp32) * sin)
+ return q_embed.to(dtype=orig_dtype), k_embed.to(dtype=orig_dtype)
+
+
+class MiniCPMMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, x):
+ if self.config.pretraining_tp > 1:
+ slice = self.intermediate_size // self.config.pretraining_tp
+ gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
+ up_proj_slices = self.up_proj.weight.split(slice, dim=0)
+ down_proj_slices = self.down_proj.weight.split(slice, dim=1)
+
+ gate_proj = torch.cat(
+ [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
+ )
+ up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
+
+ intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
+ down_proj = [
+ F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
+ ]
+ down_proj = sum(down_proj)
+ else:
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+
+ return down_proj
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+class MiniCPMAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: MiniCPMConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f'Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will '
+ 'to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` '
+ 'when creating this class.'
+ )
+
+ self.attention_dropout = config.attention_dropout
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+ self.is_causal = True
+
+ if (self.head_dim * self.num_heads) != self.hidden_size:
+ raise ValueError(
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
+ f' and `num_heads`: {self.num_heads}).'
+ )
+
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
+ self._init_rope()
+
+ def _init_rope(self):
+ if self.config.rope_scaling is None:
+ self.rotary_emb = MiniCPMRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+ else:
+ scaling_type = self.config.rope_scaling['rope_type']
+ scaling_factor = self.config.rope_scaling.get('factor', None)
+ if scaling_type == 'linear':
+ self.rotary_emb = MiniCPMLinearScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ elif scaling_type == 'dynamic':
+ self.rotary_emb = MiniCPMDynamicNTKScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ elif scaling_type == 'longrope':
+ self.rotary_emb = MiniCPMLongRoPE(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ short_factor=self.config.rope_scaling['short_factor'],
+ long_factor=self.config.rope_scaling['long_factor'],
+ base=self.rope_theta,
+ original_max_position_embeddings=self.config.rope_scaling['original_max_position_embeddings']
+ )
+ else:
+ raise ValueError(f'Unknown RoPE scaling type {scaling_type}')
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if 'padding_mask' in kwargs:
+ warnings.warn(
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ if self.config.pretraining_tp > 1:
+ key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
+ query_slices = self.q_proj.weight.split(
+ (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
+ )
+ key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
+ value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
+
+ query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
+ query_states = torch.cat(query_states, dim=-1)
+
+ key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
+ key_states = torch.cat(key_states, dim=-1)
+
+ value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
+ value_states = torch.cat(value_states, dim=-1)
+
+ else:
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
+ 'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
+ 'with a layer index.'
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)
+
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+ raise ValueError(
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
+ f' {attn_weights.size()}'
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
+ )
+ attn_weights = attn_weights + attention_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
+ f' {attn_output.size()}'
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ if self.config.pretraining_tp > 1:
+ attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
+ o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
+ attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
+ else:
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+class MiniCPMFlashAttention2(MiniCPMAttention):
+ """
+ MiniCPM flash attention module. This module inherits from `MiniCPMAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # MiniCPMFlashAttention2 attention does not support output_attentions
+ if 'padding_mask' in kwargs:
+ warnings.warn(
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
+ )
+
+ # overwrite attention_mask with padding_mask
+ attention_mask = kwargs.pop('padding_mask')
+
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ dropout_rate = self.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (MiniCPMRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ # Handle the case where the model is quantized
+ if hasattr(self.config, '_pre_quantization_dtype'):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f'The input hidden states seems to be silently casted in float32, this might be related to'
+ f' the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in'
+ f' {target_dtype}.'
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = self._flash_attention_forward(
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`int`, *optional*):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in MiniCPMFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class MiniCPMInfLLMv2Attention(MiniCPMAttention):
+ """
+ MiniCPM flash attention module. This module inherits from `MiniCPMAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ assert self.config._attn_implementation == 'flash_attention_2', 'Only flash_attention_2 is supported for sparse attention'
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ # -------sparse-------
+ self.kernel_size = self.config.sparse_config.get('kernel_size', 32)
+ self.kernel_stride = self.config.sparse_config.get('kernel_stride', 16)
+ self.init_blocks = self.config.sparse_config.get('init_blocks', 1)
+ self.block_size = self.config.sparse_config.get('block_size', 64)
+ self.window_size = self.config.sparse_config.get('window_size', 2048)
+ self.dense_len = self.config.sparse_config.get('dense_len', 8192)
+
+ self.local_blocks = self.window_size // self.block_size # local_blocks
+ self.topk = self.config.sparse_config.get('topk', 64)
+ self.use_nope = self.config.sparse_config.get('use_nope', False)
+ self.compress_k = CompressK(self.num_key_value_heads, self.head_dim, kernel_size=self.kernel_size, kernel_stride=self.kernel_stride)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # MiniCPMFlashAttention2 attention does not support output_attentions
+ if 'padding_mask' in kwargs:
+ warnings.warn(
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
+ )
+
+ # overwrite attention_mask with padding_mask
+ attention_mask = kwargs.pop('padding_mask')
+
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+ assert bsz == 1, 'Only batch_size=1 is supported at the moment.'
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ # !save no rope
+ if self.use_nope:
+ query_states_no_rope = query_states.view(bsz, q_len, self.num_heads, self.head_dim)
+ key_states_no_rope = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+ if self.use_nope:
+ no_rope_param = {
+ 'key_states_no_rope': key_states_no_rope,
+ 'query_states_no_rope': query_states_no_rope,
+ }
+ if kv_seq_len <= self.dense_len:
+ past_key_value.update_no_rope_key(key_states_no_rope, self.layer_idx)
+ else:
+ no_rope_param = None
+
+ dropout_rate = self.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (MiniCPMRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ # Handle the case where the model is quantized
+ if hasattr(self.config, '_pre_quantization_dtype'):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f'The input hidden states seems to be silently casted in float32, this might be related to'
+ f' the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in'
+ f' {target_dtype}.'
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+ if kv_seq_len < self.dense_len:
+ attn_output = self._flash_attention_forward_dense(
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate)
+ elif past_key_value is None or q_len != 1: # prefilling
+ attn_output = self._flash_attention_forward(
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate,
+ no_rope_param=no_rope_param, # if past_key_value is not None else None,
+ past_key_value=past_key_value)
+ else:
+ attn_output = self._flash_attention_forward_with_kv_cache(
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, no_rope_param=no_rope_param, past_key_value=past_key_value)
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None, no_rope_param=None, past_key_value=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`int`, *optional*):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in MiniCPMFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+ if no_rope_param is not None:
+ # nope unpad
+ no_rope_param['query_states_no_rope'] = no_rope_param['query_states_no_rope'].squeeze(0)
+ no_rope_param['key_states_no_rope'] = no_rope_param['key_states_no_rope'].squeeze(0)
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+ attn_output_unpad = self.sparse_forward(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q,
+ cu_seqlens_k,
+ max_seqlen_in_batch_q,
+ max_seqlen_in_batch_k,
+ no_rope_param=no_rope_param,
+ past_key_value=past_key_value,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ raise ValueError('Need attention mask')
+
+ return attn_output
+
+ def _flash_attention_forward_with_kv_cache(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None, no_rope_param=None, past_key_value=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`int`, *optional*):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in MiniCPMFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+
+ batch_size = query_states.shape[0]
+
+ # query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ # query_states, key_states, value_states, attention_mask, query_length=query_length
+ # )
+
+ assert batch_size == 1, 'Only batch_size=1 is supported at the moment.'
+ # prepare past kv ,new kv
+ new_q = query_states
+
+ new_k = key_states[:, -1:, :, :].contiguous()
+ new_v = value_states[:, -1:, :, :].contiguous()
+
+ past_k = key_states[:, :-1, :, :].contiguous()
+ past_v = value_states[:, :-1, :, :].contiguous()
+ if no_rope_param is not None:
+ # nope unpad
+ no_rope_param['query_states_no_rope'] = no_rope_param['query_states_no_rope'].squeeze(0)
+ no_rope_param['key_states_no_rope'] = no_rope_param['key_states_no_rope'].squeeze(0)
+
+ attn_output = self.sparse_forward_with_kv_cache(
+ past_k=past_k, past_v=past_v, new_k=new_k, new_v=new_v, new_q=new_q, batch_size=batch_size, no_rope_param=no_rope_param, past_key_value=past_key_value)
+
+ # attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ raise ValueError('need attention mask')
+
+ return attn_output
+
+ def sparse_forward(self,
+ query_layer,
+ key_layer,
+ value_layer,
+ cu_seqlens_q,
+ cu_seqlens_k,
+ max_seqlen_in_batch_q,
+ max_seqlen_in_batch_k,
+ no_rope_param=None,
+ past_key_value=None):
+ stage1_k = key_layer if no_rope_param is None else no_rope_param['key_states_no_rope']
+ compressed_k, compressed_cu_seqlens = self.compress_k(stage1_k, cu_seqlens_k)
+ compressed_v = compressed_k.clone()
+ if past_key_value is not None:
+ # Compute the start indices of keys (k) that were not compressed, Only batch_size=1 is supported at the moment.
+ no_compress_k_start = compressed_k.shape[0] * self.kernel_stride
+ past_key_value.update_compress_k(
+ compressed_k, self.layer_idx
+ )
+ past_key_value.update_no_compress_k(
+ key_layer[no_compress_k_start:], self.layer_idx, no_compress_k_start)
+ past_key_value.cached_compressed_cu_seqlens.append(compressed_cu_seqlens)
+ compressed_seqlens = compressed_cu_seqlens[1:] - compressed_cu_seqlens[:-1]
+ topk_idx = compressed_attention(
+ query_layer if no_rope_param is None else no_rope_param['query_states_no_rope'],
+ compressed_k,
+ compressed_v,
+ self.kernel_size,
+ self.kernel_stride,
+ self.block_size,
+ self.topk,
+ cu_seqlens_q,
+ compressed_cu_seqlens,
+ max_seqlen_in_batch_q,
+ compressed_seqlens.max().item(),
+ None,
+ init_blocks=self.init_blocks,
+ local_blocks=self.local_blocks,
+ )
+
+ topk_attn_output = infllmv2_attn_varlen_func(
+ query_layer,
+ key_layer,
+ value_layer,
+ cu_seqlens_q,
+ cu_seqlens_k,
+ max_seqlen_in_batch_q,
+ max_seqlen_in_batch_k,
+ dropout_p=0.0,
+ deterministic=False,
+ softmax_scale=None,
+ causal=True,
+ return_attn_probs=False,
+ block_window_size=self.window_size // self.block_size,
+ topk_idx=topk_idx
+ )
+
+ return topk_attn_output
+
+ def sparse_forward_with_kv_cache(self, past_k=None, past_v=None, new_k=None, new_v=None, new_q=None, batch_size=None, no_rope_param=None, past_key_value=None):
+
+ # stage1_k = new_k.squeeze(0) if no_rope_param is None else no_rope_param['key_states_no_rope']
+ if past_k.shape[1] + new_k.shape[1] == self.dense_len and (past_key_value.compress_k_cache == [] or len(past_key_value.compress_k_cache) < self.layer_idx + 1 or past_key_value.compress_k_cache[self.layer_idx] == []):
+ if no_rope_param is not None:
+ stage1_k = past_key_value.no_rope_key_cache[self.layer_idx].squeeze(0).contiguous() # just batch_size ==1
+ else:
+ stage1_k = torch.cat([past_k, new_k], dim=1).contiguous().squeeze(0).contiguous() # just batch_size ==1
+ compressed_k, compressed_cu_seqlens = self.compress_k(stage1_k, torch.tensor([0, stage1_k.shape[0]], device=stage1_k.device, dtype=torch.int32)) # just batch_size ==1
+
+ # Compute the start indices of keys (k) that were not compressed, Only batch_size=1 is supported at the moment.
+ no_compress_k_start = compressed_k.shape[0] * self.kernel_stride
+ past_key_value.update_compress_k(
+ compressed_k, self.layer_idx
+ )
+ past_key_value.update_no_compress_k(
+ stage1_k[no_compress_k_start:], self.layer_idx, no_compress_k_start)
+ past_key_value.cached_compressed_cu_seqlens.append(compressed_cu_seqlens)
+
+ else:
+ stage1_k = new_k.squeeze(0) if no_rope_param is None else no_rope_param['key_states_no_rope']
+ no_compress_k = past_key_value.update_no_compress_k(
+ stage1_k, self.layer_idx, kernel_stride=self.kernel_stride, kernel_size=self.kernel_size)
+ if no_compress_k is not None:
+ compressed_k = no_compress_k.mean(dim=0, keepdim=True) # [1, n_heads_k, head_dim]
+
+ compressed_k = past_key_value.update_compress_k(
+ compressed_k, self.layer_idx) # [seqlen, nheads_k, head_dim]
+
+ past_key_value.cached_compressed_cu_seqlens[self.layer_idx][-1] += 1 # !Increment the last entry in sequence lengths by 1; currently supports only batch_size = 1
+ compressed_cu_seqlens = past_key_value.cached_compressed_cu_seqlens[self.layer_idx]
+ else:
+ compressed_k = past_key_value.compress_k_cache[self.layer_idx] # [seqlen, nheads_k, head_dim]
+ compressed_cu_seqlens = past_key_value.cached_compressed_cu_seqlens[self.layer_idx]
+
+ compressed_v = compressed_k.clone()
+
+ compressed_seqlens = compressed_cu_seqlens[1:] - compressed_cu_seqlens[:-1]
+ torch.cuda.synchronize()
+ # Manually verify that the lengths match
+ assert compressed_k.shape[0] == compressed_seqlens.sum().item(), 'The length of compressed_k does not match the sum of compressed_seqlens'
+ topk_idx = compressed_attention(
+ new_q.squeeze(0).contiguous() if no_rope_param is None else no_rope_param['query_states_no_rope'],
+ compressed_k,
+ compressed_v,
+ self.kernel_size,
+ self.kernel_stride,
+ self.block_size,
+ self.topk,
+ torch.tensor([0, 1], device=compressed_k.device, dtype=torch.int32),
+ compressed_cu_seqlens,
+ 1,
+ compressed_seqlens.max().item(),
+ None,
+ init_blocks=self.init_blocks,
+ local_blocks=self.local_blocks,
+ total_seq_lens=past_k.shape[1] + 1, # !Only batch_size=1 is supported at the moment.
+ )
+
+ repeat_times = 1
+ if repeat_times > 1:
+ new_q = new_q.repeat_interleave(repeat_times, dim=-2)
+ else:
+ new_q = new_q
+
+ cache_batch_idx = torch.arange(batch_size, device=new_q.device, dtype=torch.int32)
+
+ seqlen_k = past_k.shape[1] + new_k.shape[1] # !Only batch_size=1 is supported at the moment.
+ seqlens_k = torch.full((batch_size,), seqlen_k - 1, dtype=torch.int32, device=new_q.device)
+
+ past_k = torch.cat([past_k, torch.zeros_like(new_k, dtype=new_k.dtype)], dim=1).contiguous() # Append one zero vector to avoid potential out-of-bounds access
+ past_v = torch.cat([past_v, torch.zeros_like(new_v, dtype=new_v.dtype)], dim=1).contiguous() # Append one zero vector to avoid potential out-of-bounds access
+ topk_attn_output = infllmv2_attn_with_kvcache(
+ q=new_q,
+ k_cache=past_k,
+ v_cache=past_v,
+ topk_idx=topk_idx,
+ block_window_size=self.window_size // self.block_size,
+ k=new_k, # [batch_size, 1, nheads_k, d]
+ v=new_v, # [batch_size, 1, nheads_k, d]
+ cache_seqlens=seqlens_k, # current_seqlens_k-1
+ rotary_cos=None, # No rotary embeddings
+ rotary_sin=None, # No rotary embeddings
+ cache_batch_idx=cache_batch_idx,
+ causal=False, # Renaming to match function signature
+ )
+ return topk_attn_output
+
+ def _flash_attention_forward_dense(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`int`, *optional*):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in MiniCPMFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class MiniCPMSdpaAttention(MiniCPMAttention):
+ """
+ MiniCPM attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `MiniCPMAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ # Adapted from MiniCPMAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ 'MiniCPMModel is using MiniCPMSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, '
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
+ )
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == 'cuda' and attention_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=attention_mask,
+ dropout_p=self.attention_dropout if self.training else 0.0,
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+
+MINICPM_ATTENTION_CLASSES = {
+ 'eager': MiniCPMAttention,
+ 'flash_attention_2': MiniCPMFlashAttention2,
+ 'sdpa': MiniCPMSdpaAttention,
+}
+
+
+class MiniCPMDecoderLayer(nn.Module):
+ def __init__(self, config: MiniCPMConfig, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ if config.sparse_config is not None and torch.cuda.is_available():
+ raise NotImplementedError("MiniCPM4-0.5B does not support sparse attention yet.")
+ else:
+ self.self_attn = MINICPM_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
+
+ self.mlp = MiniCPMMLP(config)
+ self.input_layernorm = MiniCPMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = MiniCPMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ self.scale_depth = config.scale_depth
+ self.num_hidden_layers = config.num_hidden_layers
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ **kwargs,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*):
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
+ query_sequence_length, key_sequence_length)` if default attention is used.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+ if 'padding_mask' in kwargs:
+ warnings.warn(
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
+ )
+
+ residual = hidden_states
+ hidden_states = self.input_layernorm(hidden_states)
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ **kwargs,
+ )
+
+ hidden_states = residual + hidden_states * (self.scale_depth / math.sqrt(self.num_hidden_layers))
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states * (self.scale_depth / math.sqrt(self.num_hidden_layers))
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+MINICPM_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`MiniCPMConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ 'The bare MiniCPM Model outputting raw hidden-states without any specific head on top.',
+ MINICPM_START_DOCSTRING,
+)
+class MiniCPMPreTrainedModel(PreTrainedModel):
+ config_class = MiniCPMConfig
+ base_model_prefix = 'model'
+ supports_gradient_checkpointing = True
+ _no_split_modules = ['MiniCPMDecoderLayer']
+ _skip_keys_device_placement = 'past_key_values'
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_cache_class = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+MINICPM_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance;
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ 'The bare MiniCPM Model outputting raw hidden-states without any specific head on top.',
+ MINICPM_START_DOCSTRING,
+)
+class MiniCPMModel(MiniCPMPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MiniCPMDecoderLayer`]
+
+ Args:
+ config: MiniCPMConfig
+ """
+
+ def __init__(self, config: MiniCPMConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [MiniCPMDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self._use_sdpa = config._attn_implementation == 'sdpa'
+ self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2'
+
+ self.norm = MiniCPMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(MINICPM_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape[:2]
+ elif inputs_embeds is not None:
+ batch_size, seq_length = inputs_embeds.shape[:2]
+ else:
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
+ )
+ use_cache = False
+
+ past_key_values_length = 0
+
+ if use_cache:
+ use_legacy_cache = not isinstance(past_key_values, Cache)
+ if use_legacy_cache:
+ raise ValueError(
+ 'You must use the new past_key_values format, such as the Cache class, instead of the old tuple format.'
+ )
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
+ if self.config.sparse_config is not None and torch.cuda.is_available() and past_key_values_length == 0:
+ past_key_values = DynamicCacheQKV()
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.config.scale_emb
+
+ if self._use_flash_attention_2:
+ # 2d mask is passed through the layers
+ # attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ if attention_mask is None:
+ raise ValueError(
+ f'need attention_mask for flash attention, but got {attention_mask}.'
+ )
+ elif self._use_sdpa and not output_attentions:
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
+ # the manual implementation that requires a 4D causal mask in all cases.
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask,
+ (batch_size, seq_length),
+ inputs_embeds,
+ past_key_values_length,
+ )
+ else:
+ # 4d mask is passed through the layers
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
+ )
+
+ # embed positions
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = None
+ if use_cache:
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
+ if not return_dict:
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+
+class MiniCPMForCausalLM(MiniCPMPreTrainedModel):
+ _tied_weights_keys = ['lm_head.weight']
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = MiniCPMModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(MINICPM_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ logits_to_keep: Union[int, torch.Tensor] = 0,
+ **kwargs,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MiniCPMForCausalLM
+
+ >>> model = MiniCPMForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
+ hidden_states = hidden_states[:, slice_indices, :].contiguous()
+ if self.config.pretraining_tp > 1:
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
+ logits = torch.cat(logits, dim=-1)
+ else:
+ logits = self.lm_head(hidden_states / (self.config.hidden_size / self.config.dim_model_base))
+ logits = logits.float()
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
+ ):
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ max_cache_length = None # past_key_values.get_max_length()
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length):]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get('position_ids', None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1]:]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {'inputs_embeds': inputs_embeds}
+ else:
+ model_inputs = {'input_ids': input_ids}
+
+ model_inputs.update(
+ {
+ 'position_ids': position_ids,
+ 'past_key_values': past_key_values,
+ 'use_cache': kwargs.get('use_cache'),
+ 'attention_mask': attention_mask,
+ }
+ )
+ # Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
+ for key, value in kwargs.items():
+ if key not in model_inputs:
+ model_inputs[key] = value
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+ @torch.inference_mode()
+ def chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = 'user',
+ max_length: int = 4096, num_beams=1, do_sample=True, top_p=0.8, temperature=0.3, logits_processor=None,
+ **kwargs):
+ if history is None:
+ history = []
+ if logits_processor:
+ gen_kwargs = {
+ 'max_length': max_length,
+ 'num_beams': num_beams,
+ 'do_sample': do_sample,
+ 'top_p': top_p,
+ 'temperature': temperature,
+ 'logits_processor': logits_processor,
+ **kwargs
+ }
+ else:
+ gen_kwargs = {
+ 'max_length': max_length,
+ 'num_beams': num_beams,
+ 'do_sample': do_sample,
+ 'top_p': top_p,
+ 'temperature': temperature,
+ 'logits_processor': logits_processor,
+ **kwargs
+ }
+
+ history.append({'role': role, 'content': query})
+ history_str = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=False)
+ inputs = tokenizer(history_str, return_tensors='pt').to(self.device)
+ outputs = self.generate(**inputs, **gen_kwargs)
+ outputs = outputs.tolist()[0][len(inputs['input_ids'][0]):-1]
+ response = tokenizer.decode(outputs)
+ pattern = re.compile(r'.*?(?=|<用户>)', re.DOTALL)
+ matches = pattern.findall(response)
+ if len(matches) > 0:
+ response = matches[0]
+ history.append({'role': 'assistant', 'content': response})
+ return response, history
+
+
+@add_start_docstrings(
+ """
+ The MiniCPM Model transformer with a sequence classification head on top (linear layer).
+
+ [`MiniCPMForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ MINICPM_START_DOCSTRING,
+)
+class MiniCPMForSequenceClassification(MiniCPMPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = MiniCPMModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(MINICPM_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
+ logits.device
+ )
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = 'regression'
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = 'single_label_classification'
+ else:
+ self.config.problem_type = 'multi_label_classification'
+
+ if self.config.problem_type == 'regression':
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == 'single_label_classification':
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == 'multi_label_classification':
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000..2fcea2d
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,33 @@
+{
+ "additional_special_tokens": [
+ "<|im_end|>",
+ "<|im_start|>",
+ "<|tool_call|>",
+ "<|execute_start|>",
+ "<|execute_end|>",
+ "<|fim_prefix|>",
+ "<|fim_middle|>",
+ "<|fim_suffix|>"
+ ],
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|im_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000..15a2f9b
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,490843 @@
+{
+ "version": "1.0",
+ "truncation": null,
+ "padding": null,
+ "added_tokens": [
+ {
+ "id": 0,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 1,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 2,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73440,
+ "content": "<|im_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73441,
+ "content": "<|im_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73442,
+ "content": "<|tool_call|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73443,
+ "content": "<|execute_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73444,
+ "content": "<|execute_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73445,
+ "content": "<|fim_prefix|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73446,
+ "content": "<|fim_middle|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73447,
+ "content": "<|fim_suffix|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ }
+ ],
+ "normalizer": {
+ "type": "Sequence",
+ "normalizers": [
+ {
+ "type": "Prepend",
+ "prepend": "▁"
+ },
+ {
+ "type": "Replace",
+ "pattern": {
+ "String": " "
+ },
+ "content": "▁"
+ }
+ ]
+ },
+ "pre_tokenizer": null,
+ "post_processor": {
+ "type": "TemplateProcessing",
+ "single": [
+ {
+ "SpecialToken": {
+ "id": "",
+ "type_id": 0
+ }
+ },
+ {
+ "Sequence": {
+ "id": "A",
+ "type_id": 0
+ }
+ }
+ ],
+ "pair": [
+ {
+ "SpecialToken": {
+ "id": "",
+ "type_id": 0
+ }
+ },
+ {
+ "Sequence": {
+ "id": "A",
+ "type_id": 0
+ }
+ },
+ {
+ "SpecialToken": {
+ "id": "",
+ "type_id": 1
+ }
+ },
+ {
+ "Sequence": {
+ "id": "B",
+ "type_id": 1
+ }
+ }
+ ],
+ "special_tokens": {
+ "": {
+ "id": "",
+ "ids": [
+ 1
+ ],
+ "tokens": [
+ ""
+ ]
+ }
+ }
+ },
+ "decoder": {
+ "type": "Sequence",
+ "decoders": [
+ {
+ "type": "Replace",
+ "pattern": {
+ "String": "▁"
+ },
+ "content": " "
+ },
+ {
+ "type": "ByteFallback"
+ },
+ {
+ "type": "Fuse"
+ },
+ {
+ "type": "Strip",
+ "content": " ",
+ "start": 1,
+ "stop": 0
+ }
+ ]
+ },
+ "model": {
+ "type": "BPE",
+ "dropout": null,
+ "unk_token": "",
+ "continuing_subword_prefix": null,
+ "end_of_word_suffix": null,
+ "fuse_unk": true,
+ "byte_fallback": true,
+ "ignore_merges": false,
+ "vocab": {
+ "": 0,
+ "": 1,
+ "": 2,
+ "": 3,
+ "": 4,
+ "\n": 5,
+ "\t": 6,
+ "
": 7,
+ "
": 8,
+ "": 9,
+ "": 10,
+ "": 11,
+ "
": 12,
+ "": 13,
+ " | | ": 14,
+ "": 15,
+ "": 16,
+ "": 17,
+ "": 18,
+ "": 21,
+ "": 22,
+ "
": 23,
+ "": 24,
+ "": 25,
+ "": 26,
+ "": 27,
+ "": 28,
+ "": 29,
+ "": 30,
+ "": 31,
+ "": 32,
+ "
": 33,
+ "
": 34,
+ "
": 35,
+ "": 36,
+ "": 37,
+ "": 38,
+ "
": 39,
+ "": 40,
+ "": 41,
+ "
": 42,
+ "": 43,
+ "
": 44,
+ "
": 45,
+ "": 46,
+ "": 47,
+ "
": 48,
+ "": 49,
+ "": 50,
+ "": 51,
+ "0": 52,
+ "1": 53,
+ "2": 54,
+ "3": 55,
+ "4": 56,
+ "5": 57,
+ "6": 58,
+ "7": 59,
+ "8": 60,
+ "9": 61,
+ "+": 62,
+ "-": 63,
+ "=": 64,
+ ",": 65,
+ "。": 66,
+ "!": 67,
+ "?": 68,
+ "、": 69,
+ ":": 70,
+ "¥": 71,
+ ".": 72,
+ "!": 73,
+ "?": 74,
+ "...": 75,
+ "。。。": 76,
+ "。。。。。。": 77,
+ "《": 78,
+ "》": 79,
+ "【": 80,
+ "】": 81,
+ "『": 82,
+ "』": 83,
+ "```": 84,
+ "": 86,
+ "---": 87,
+ "": 88,
+ ";": 89,
+ ".": 90,
+ "=": 91,
+ "<": 92,
+ ">": 93,
+ "-": 94,
+ "+": 95,
+ "%": 96,
+ "‼": 97,
+ "㊣": 98,
+ "/": 99,
+ "|": 100,
+ "": 101,
+ "": 102,
+ "": 103,
+ "": 104,
+ "": 105,
+ "": 106,
+ "": 107,
+ "": 108,
+ "": 109,
+ "": 110,
+ "": 111,
+ "": 112,
+ "": 113,
+ "": 114,
+ "": 115,
+ "": 116,
+ "": 117,
+ "": 118,
+ "": 119,
+ "": 120,
+ "": 121,
+ "": 122,
+ "": 123,
+ "": 124,
+ "": 125,
+ "": 126,
+ "": 127,
+ "": 128,
+ "": 129,
+ "": 130,
+ "": 131,
+ "": 132,
+ "": 133,
+ "": 134,
+ "": 135,
+ "": 136,
+ "": 137,
+ "": 138,
+ "": 139,
+ "": 140,
+ "": 141,
+ "": 142,
+ "": 143,
+ "": 144,
+ "": 145,
+ "": 146,
+ "": 147,
+ "": 148,
+ "": 149,
+ "": 150,
+ "": 151,
+ "": 152,
+ "": 153,
+ "": 154,
+ "": 155,
+ "": 156,
+ "": 157,
+ "": 158,
+ "": 159,
+ "": 160,
+ "": 161,
+ "": 162,
+ "": 163,
+ "": 164,
+ "": 165,
+ "": 166,
+ "": 167,
+ "": 168,
+ "": 169,
+ "": 170,
+ "": 171,
+ "": 172,
+ "": 173,
+ "": 174,
+ "": 175,
+ "": 176,
+ "": 177,
+ "": 178,
+ "": 179,
+ "": 180,
+ "": 181,
+ "": 182,
+ "": 183,
+ "": 184,
+ "": 185,
+ "": 186,
+ "": 187,
+ "": 188,
+ "": 189,
+ "": 190,
+ "": 191,
+ "": 192,
+ "": 193,
+ "": 194,
+ "": 195,
+ "": 196,
+ "": 197,
+ "": 198,
+ "": 199,
+ "": 200,
+ "": 201,
+ "": 202,
+ "": 203,
+ "": 204,
+ "": 205,
+ "": 206,
+ "": 207,
+ "": 208,
+ "": 209,
+ "": 210,
+ "": 211,
+ "": 212,
+ "": 213,
+ "": 214,
+ "": 215,
+ "": 216,
+ "": 217,
+ "": 218,
+ "": 219,
+ "": 220,
+ "": 221,
+ "": 222,
+ "": 223,
+ "": 224,
+ "": 225,
+ "": 226,
+ "": 227,
+ "": 228,
+ "": 229,
+ "": 230,
+ "": 231,
+ "": 232,
+ "": 233,
+ "": 234,
+ "": 235,
+ "": 236,
+ "": 237,
+ "": 238,
+ "": 239,
+ "": 240,
+ "": 241,
+ "": 242,
+ "": 243,
+ "": 244,
+ "": 245,
+ "": 246,
+ "": 247,
+ "": 248,
+ "": 249,
+ "": 250,
+ "": 251,
+ "": 252,
+ "": 253,
+ "": 254,
+ "": 255,
+ "": 256,
+ "": 257,
+ "": 258,
+ "": 259,
+ "": 260,
+ "": 261,
+ "": 262,
+ "": 263,
+ "": 264,
+ "": 265,
+ "": 266,
+ "": 267,
+ "": 268,
+ "": 269,
+ "": 270,
+ "": 271,
+ "": 272,
+ "": 273,
+ "": 274,
+ "": 275,
+ "": 276,
+ "": 277,
+ "": 278,
+ "": 279,
+ "": 280,
+ "": 281,
+ "": 282,
+ "": 283,
+ "": 284,
+ "": 285,
+ "": 286,
+ "": 287,
+ "": 288,
+ "": 289,
+ "": 290,
+ "": 291,
+ "": 292,
+ "": 293,
+ "": 294,
+ "": 295,
+ "": 296,
+ "": 297,
+ "": 298,
+ "": 299,
+ "": 300,
+ "": 301,
+ "": 302,
+ "": 303,
+ "": 304,
+ "": 305,
+ "": 306,
+ "": 307,
+ "": 308,
+ "": 309,
+ "": 310,
+ "": 311,
+ "": 312,
+ "": 313,
+ "": 314,
+ "": 315,
+ "": 316,
+ "": 317,
+ "": 318,
+ "": 319,
+ "": 320,
+ "": 321,
+ "": 322,
+ "": 323,
+ "": 324,
+ "": 325,
+ "": 326,
+ "": 327,
+ "": 328,
+ "": 329,
+ "": 330,
+ "": 331,
+ "": 332,
+ "": 333,
+ "": 334,
+ "": 335,
+ "": 336,
+ "": 337,
+ "": 338,
+ "": 339,
+ "": 340,
+ "": 341,
+ "": 342,
+ "": 343,
+ "": 344,
+ "": 345,
+ "": 346,
+ "": 347,
+ "": 348,
+ "": 349,
+ "": 350,
+ "": 351,
+ "": 352,
+ "": 353,
+ "