
    oi!                        S SK Jr  S SKrS SKrS SKJr  S SKJr  S SKJ	r	J
r
  S SKJr  SSKJr  SSKJrJr   " S	 S
\	5      rg)    )annotationsN)Conv1D)	BaseTunerBaseTunerLayer)4TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING   )VBLoRAConfig)LinearVBLoRALayerc                  r    \ rS rSr% SrSrS\S'   \r\	r
SS jrSS jrS r\S	 5       rSSS
 jjrSS jrSrg)VBLoRAModel   a  
Creates VBLoRA model from a pretrained transformers model.

The method is described in detail in https://huggingface.co/papers/2405.15179.

Args:
    model ([`~transformers.PreTrainedModel`]): The model to be adapted.
    config ([`VBLoRAConfig`]): The configuration of the VBLoRA model.
    adapter_name (`str`): The name of the adapter, defaults to `"default"`.
    low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
        Create empty adapter weights on meta device. Useful to speed up the loading process.

Returns:
    `torch.nn.Module`: The VBLoRA model.

Example:

    ```py
    >>> from transformers import AutoModelForCausalLM
    >>> from peft import VBLoRAConfig, get_peft_model

    >>> base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
    >>> config = VBLoRAConfig(
    ...     task_type="SEQ_CLS",
    ...     r=4,
    ...     target_modules=["fc1", "fc2", "k_proj", "out_proj", "q_proj", "v_proj"],
    ...     num_vectors=60,
    ...     vector_length=256,
    ...     save_only_topk_weights=True,
    ... )
    >>> model = get_peft_model(base_model, config)
    ```

**Attributes**:
    - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
    - **peft_config** ([`VBLoRAConfig`]): The configuration of the VBLoRAConfig model.
vblora_strprefixc                    [         R                  " UR                  UR                  5      n[         R                  R
                  R                  X1R                  * UR                  5        X0R                  U'   g N)	torchzerosnum_vectorsvector_lengthnninituniform_init_vector_bank_boundvblora_vector_bank)selfconfigadapter_namer   s       R/home/james-whalen/.local/lib/python3.13/site-packages/peft/tuners/vblora/model.py_init_vblora_vector_bank$VBLoRAModel._init_vblora_vector_bankH   sV    "[[););V=Q=QR14Q4Q3QSYSpSpq0B-    c                :    [         R                  " 0 5      U l        g r   )r   ParameterDictr   )r   modelr   r   s       r    _pre_injection_hookVBLoRAModel._pre_injection_hookM   s    "$"2"22"6r#   c                ,   Uc  [        S5      e[        US5      =(       a    UR                  S LnUR                  US.nU R	                  X5        [        U[        5      (       a]  UR                  UU R                  UR                  UR                  UR                  UR                  UR                  UR                  S9  g U R                  " SUU R                  UUS.UD6n	X R                   ;  a  U	R#                  S5        U R%                  XTX5        g )NzCurrent Key shouldn't be `None`bias)fan_in_fan_outr*   )r   r   rtopkr   r   vblora_dropoutinit_logits_std)vblora_configr   r   targetF )
ValueErrorhasattrr*   r+   r!   
isinstancer
   update_layerr   r,   r-   r   r   r.   r/   _create_new_moduleactive_adapterrequires_grad__replace_module)
r   r0   r   r1   target_nameparentcurrent_keyr*   kwargs
new_modules
             r    _create_and_replaceVBLoRAModel._create_and_replaceP   s    >??vv&B6;;d+B+::
 	%%mB ff%%)#'#:#://"'')55+99,;; - = =   	 00 +#'#:#:)	
 J #6#66))%0  jIr#   c                P   [        U[        5      (       a  UR                  5       nOUn[        U[        R                  R
                  5      (       a-  US   (       a"  [        R                  " S5        S=US'   U l        OV[        U[        5      (       a2  SUS'   US   (       d"  [        R                  " S5        S=US'   U l        O[        SU S35      e[        S
UUUU R                  U R                  U R                  U R                  U R                  U R                   S	.	UD6nU$ )Nr+   zjfan_in_fan_out is set to True but the target module is `torch.nn.Linear`. Setting fan_in_fan_out to False.FTis_target_conv_1d_layerzafan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True.zTarget module z is not supported. Currently, only the following modules are supported: `torch.nn.Linear`, `transformers.pytorch_utils.Conv1D`.)	
base_layerr   r   r,   r   r   r-   r.   r/   r2   )r5   r   get_base_layerr   r   r
   warningswarnr+   r   r3   r,   r   r   r-   r.   r/   )r0   r   r   r1   r>   target_base_layerr?   s          r    r7   VBLoRAModel._create_new_module|   s5   fn-- & 5 5 7 &'99&'7 KPO'(=+G)62204F,-*+w KON'(=+G  )J J   
1%oo%11'55##(77)99
 

 r#   c                   SnSnSnU R                  5        H^  u  pVSU;   a  X&R                  5       -  nM  SU;   a  X6R                  5       -  nM9  UR                  (       d  ML  XFR                  5       -  nM`     U R                  U   R                  (       a  U R                  U   R
                  nSnUS:  a  SnOUS:  a  SnOUS	:  a  SnOS
nX R                  U   R
                  -  U R                  U   R                  S-
  -  n	X R                  U   R
                  -  U R                  U   R                  -  U-  n
[        X9-   U
-   5      nX4$ X2-   nX4$ )zP
Returns the number of savable VB-LoRA parameters and other savable parameters.
r   vblora_logitsr   r      g      ?i   g      ?l           )named_parametersnumelrequires_gradpeft_configsave_only_topk_weightsr   r-   int)r   adapterlogits_paramsvector_bank_paramsother_paramsnameparamr   factortopk_weight_paramstopk_indices_paramsvblora_paramss               r    get_nb_savable_parameters%VBLoRAModel.get_nb_savable_parameters   su    002KD$&.%-"kkm3"$$$- 3 G$;;**73??KFT!u$u$ 0 0 9 E EEIYIYZaIbIgIgjkIkl   0 0 9 E EEHXHXY`HaHfHffioo     2 GJ] ]^M ** />M**r#   c                R    U R                  5       u  p[        SUS SX-   S 35        g)zO
Prints the number of savable VB-LoRA parameters and total savable parameters.
z1VB-LoRA params to-be-saved (float32-equivalent): z,dz || total params to-be-saved: N)r^   print)r   r]   rW   s      r    print_savable_parameters$VBLoRAModel.print_savable_parameters   s>     '+&D&D&F#?b?Q R,-:-I2+NP	
r#   )r   N)r   r	   r   r   returnNone)r&   z	nn.Moduler   r	   r   r   rd   re   )default)rd   ztuple[int, int])rd   re   )__name__
__module____qualname____firstlineno____doc__r   __annotations__r   tuner_layer_clsr   target_module_mappingr!   r'   r@   staticmethodr7   r^   rb   __static_attributes__r2   r#   r    r   r      sQ    $L FC!OPC
7*JX & &P"+H
r#   r   )
__future__r   rF   r   torch.nnr   transformers.pytorch_utilsr   peft.tuners.tuners_utilsr   r   
peft.utilsr   r   r	   layerr
   r   r   r2   r#   r    <module>rw      s0    #    - > K   &t
) t
r#   