34 lines
969 B
Python
34 lines
969 B
Python
|
"""VideoLLaMA3 vision encoder model configuration."""
|
||
|
|
||
|
from transformers import PretrainedConfig
|
||
|
|
||
|
|
||
|
class Videollama3VisionEncoderConfig(PretrainedConfig):
|
||
|
|
||
|
model_type = "videollama3_vision_encoder"
|
||
|
|
||
|
def __init__(
|
||
|
self,
|
||
|
hidden_size=768,
|
||
|
intermediate_size=3072,
|
||
|
num_hidden_layers=12,
|
||
|
num_attention_heads=12,
|
||
|
num_channels=3,
|
||
|
patch_size=16,
|
||
|
hidden_act="gelu_pytorch_tanh",
|
||
|
layer_norm_eps=1e-6,
|
||
|
attention_dropout=0.0,
|
||
|
**kwargs,
|
||
|
):
|
||
|
super().__init__(**kwargs)
|
||
|
|
||
|
self.hidden_size = hidden_size
|
||
|
self.intermediate_size = intermediate_size
|
||
|
self.num_hidden_layers = num_hidden_layers
|
||
|
self.num_attention_heads = num_attention_heads
|
||
|
self.num_channels = num_channels
|
||
|
self.patch_size = patch_size
|
||
|
self.attention_dropout = attention_dropout
|
||
|
self.layer_norm_eps = layer_norm_eps
|
||
|
self.hidden_act = hidden_act
|