
    cCi#                      ,    S SK Jr   " S S\5      rS/rg)   )PretrainedConfigc                      ^  \ rS rSrSrSrS/rSSSSSSSS.rS/S	/4S
S/S
/4S
/S
/4S.r                    SU 4S jjr	Sr
U =r$ )MinistralConfig
   a  
This is the configuration class to store the configuration of a [`MinistralModel`]. It is used to instantiate an
Ministral model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Ministral-8B-Instruct-2410.

[mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410)
[mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410)

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    vocab_size (`int`, *optional*, defaults to 32000):
        Vocabulary size of the Ministral model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`MinistralModel`]
    hidden_size (`int`, *optional*, defaults to 4096):
        Dimension of the hidden representations.
    intermediate_size (`int`, *optional*, defaults to 14336):
        Dimension of the MLP representations.
    num_hidden_layers (`int`, *optional*, defaults to 32):
        Number of hidden layers in the Transformer encoder.
    num_attention_heads (`int`, *optional*, defaults to 32):
        Number of attention heads for each attention layer in the Transformer encoder.
    num_key_value_heads (`int`, *optional*, defaults to 8):
        This is the number of key_value heads that should be used to implement Grouped Query Attention. If
        `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
        `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
        converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
        by meanpooling all the original heads within that group. For more details, check out [this
        paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
    head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
        The attention head dimension.
    hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
        The non-linear activation function (function or string) in the decoder.
    max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
        The maximum sequence length that this model might ever be used with. Ministral's sliding window attention
        allows sequence of up to 4096*32 tokens.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    rms_norm_eps (`float`, *optional*, defaults to 1e-06):
        The epsilon used by the rms normalization layers.
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models). Only
        relevant if `config.is_decoder=True`.
    pad_token_id (`int`, *optional*):
        The id of the padding token.
    bos_token_id (`int`, *optional*, defaults to 1):
        The id of the "beginning-of-sequence" token.
    eos_token_id (`int`, *optional*, defaults to 2):
        The id of the "end-of-sequence" token.
    tie_word_embeddings (`bool`, *optional*, defaults to `False`):
        Whether the model's input and output word embeddings should be tied.
    rope_theta (`float`, *optional*, defaults to 10000.0):
        The base period of the RoPE embeddings.
    sliding_window (`int`, *optional*, defaults to 4096):
        Sliding window attention window size. If not specified, will default to `4096`.
    attention_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for the attention probabilities.
    layer_types (`list`, *optional*):
        Attention pattern for each layer.

```python
>>> from transformers import MinistralModel, MinistralConfig

>>> # Initializing a Ministral 8B style configuration
>>> configuration = MinistralConfig()

>>> # Initializing a model from the Ministral 8B style configuration
>>> model = MinistralModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```	ministralpast_key_valuescolwiserowwise)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.mlp.gate_projzlayers.*.mlp.up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnormc                 R  > [         TU ]  " SUUUUS.UD6  Xl        Xl        X l        X0l        X@l        XPl        UU l        Xpl	        Uc  UnX`l
        Xl        Xl        Xl        Xl        UU l        UU l        UU l        U R"                  c  U R                  b  SOS/U-  U l        g g )N)pad_token_idbos_token_ideos_token_idtie_word_embeddingssliding_attentionfull_attention )super__init__
vocab_sizemax_position_embeddingshidden_sizeintermediate_sizenum_hidden_layersnum_attention_headssliding_windowhead_dimnum_key_value_heads
hidden_actinitializer_rangerms_norm_eps	use_cache
rope_thetaattention_dropoutlayer_types)selfr   r   r   r    r!   r$   r#   r%   r   r&   r'   r(   r   r   r   r   r)   r"   r*   r+   kwargs	__class__s                         o/home/james-whalen/.local/lib/python3.13/site-packages/transformers/models/ministral/configuration_ministral.pyr   MinistralConfig.__init__h   s    0 	 	
%%% 3		

 	
 %'>$&!2!2#6 ,  &"5#6 $!2("$!2&#'+':':'F#L\ ! "D $    )r*   r#   r%   r   r&   r   r+   r   r!   r    r$   r'   r)   r"   r(   r   )i }     i 8      r3      Nsilui   g{Gz?gư>TN      Fg     @r2   g        N)__name__
__module____qualname____firstlineno____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planbase_model_pp_planr   __static_attributes____classcell__)r.   s   @r/   r   r   
   s    IV J#4"5 &/%.%.%."+ )"+ &(9:#%568IJ!"_$56  )!+8" 8"r1   r   N)configuration_utilsr   r   __all__r   r1   r/   <module>rE      s$    4V"& V"r 
r1   