
    cCiW                     `    S r SSKJr  SSKJr  \R
                  " \5      r " S S\5      rS/r	g)zQDQBERT model configuration   )PretrainedConfig)loggingc                   T   ^  \ rS rSrSrSr                SU 4S jjrSrU =r$ )QDQBertConfig   a  
This is the configuration class to store the configuration of a [`QDQBertModel`]. It is used to instantiate an
QDQBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the BERT
[google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    vocab_size (`int`, *optional*, defaults to 30522):
        Vocabulary size of the QDQBERT model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`QDQBertModel`].
    hidden_size (`int`, *optional*, defaults to 768):
        Dimension of the encoder layers and the pooler layer.
    num_hidden_layers (`int`, *optional*, defaults to 12):
        Number of hidden layers in the Transformer encoder.
    num_attention_heads (`int`, *optional*, defaults to 12):
        Number of attention heads for each attention layer in the Transformer encoder.
    intermediate_size (`int`, *optional*, defaults to 3072):
        Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
    hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
        `"relu"`, `"selu"` and `"gelu_new"` are supported.
    hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
    attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the attention probabilities.
    max_position_embeddings (`int`, *optional*, defaults to 512):
        The maximum sequence length that this model might ever be used with. Typically set this to something large
        just in case (e.g., 512 or 1024 or 2048).
    type_vocab_size (`int`, *optional*, defaults to 2):
        The vocabulary size of the `token_type_ids` passed when calling [`QDQBertModel`].
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    layer_norm_eps (`float`, *optional*, defaults to 1e-12):
        The epsilon used by the layer normalization layers.
    is_decoder (`bool`, *optional*, defaults to `False`):
        Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models). Only
        relevant if `config.is_decoder=True`.

Examples:

```python
>>> from transformers import QDQBertModel, QDQBertConfig

>>> # Initializing a QDQBERT google-bert/bert-base-uncased style configuration
>>> configuration = QDQBertConfig()

>>> # Initializing a model from the google-bert/bert-base-uncased style configuration
>>> model = QDQBertModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```qdqbertc                    > [         TU ]  " SXUS.UD6  Xl        Xl        X l        X0l        X@l        XPl        X`l        Xpl	        Xl
        Xl        Xl        Xl        Xl        g )N)pad_token_idbos_token_ideos_token_id )super__init__
vocab_sizemax_position_embeddingshidden_sizenum_hidden_layersnum_attention_headsintermediate_size
hidden_acthidden_dropout_probattention_probs_dropout_probinitializer_rangetype_vocab_sizelayer_norm_eps	use_cache)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r
   r   r   kwargs	__class__s                     v/home/james-whalen/.local/lib/python3.13/site-packages/transformers/models/deprecated/qdqbert/configuration_qdqbert.pyr   QDQBertConfig.__init__V   si    ( 	sl\hslrs$'>$&!2#6 !2$#6 ,H)!2.,"    )r   r   r   r   r   r   r   r   r   r   r   r   r   )i:w  i      r#   i   gelu皙?r%   i      g{Gz?g-q=T       r&   )	__name__
__module____qualname____firstlineno____doc__
model_typer   __static_attributes____classcell__)r   s   @r    r   r      sL    9v J %( ##"# "#r"   r   N)
r-   configuration_utilsr   utilsr   
get_loggerr)   loggerr   __all__r   r"   r    <module>r6      s<    " 4  
		H	%`#$ `#F 
r"   