
    bCi!_                        S SK JrJrJr  S SKrS SKJr  SSKJr  SSKJ	r	J
r
  SSKJr  SSKJr  SS	KJr  SS
KJr  SSKJrJr  SSKJrJr  SSKJrJr  SSKJr  SSKJrJ r J!r!  SSK"J#r#  SSK$J%r%  SSK&J'r'   " S S\RP                  5      r) " S S\RP                  5      r* " S S\RP                  5      r+S\RX                  S\-S\RX                  4S jr. S4S\RP                  S\RX                  S \RX                  S!\RX                  S"\\RX                     S#\/S$\/S%\\   4S& jjr0S' r1S5S( jr2 " S) S*\RP                  5      r3 " S+ S,\5      r4\  " S- S.\5      5       r5\  " S/ S0\55      5       r6\  " S1 S2\5\5      5       r7/ S3Qr8g)6    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)deprecate_kwarg)check_model_inputs   )CohereConfigc                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )CohereLayerNorm3   c                    > [         TU ]  5         [        R                  " [        R
                  " U5      5      U l        X l        g)zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizeepsbias	__class__s       d/home/james-whalen/.local/lib/python3.13/site-packages/transformers/models/cohere/modeling_cohere.pyr"   CohereLayerNorm.__init__4   s-    ll5::k#:; #    c                    UR                   nUR                  [        R                  5      nUR	                  SSS9nX-
  R                  S5      R	                  SSS9nX-
  [        R                  " X@R                  -   5      -  nU R                  R                  [        R                  5      U-  nUR                  U5      $ )NT)keepdim   )	dtypetor$   float32meanpowrsqrtr'   r&   )r(   hidden_statesinput_dtyper7   variances        r-   forwardCohereLayerNorm.forward:   s    #))%((7!!"d!3!(--a055b$5G&-XH]H]=]1^^u}}5E,,r/   )r'   r&   )Ngh㈵>F__name__
__module____qualname____firstlineno__r"   r=   __static_attributes____classcell__r,   s   @r-   r   r   3   s    $- -r/   r   c                      ^  \ rS rSr% \R
                  \S'   SS\4U 4S jjjr\R                  " 5       \
S 5       5       rSrU =r$ )CohereRotaryEmbeddingD   inv_freqconfigc                   > [         TU ]  5         [        US5      (       aZ  [        UR                  [
        5      (       a;  UR                  R                  SUR                  R                  S5      5      U l        OSU l        UR                  U l	        UR                  U l
        Xl        [        U R                     U l        U R                  U R                  U5      u  o0l        U R                  SUSS9  U R                   U l        g )Nrope_scaling	rope_typetypedefaultrJ   F)
persistent)r!   r"   hasattr
isinstancerM   dictgetrN   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrK   r   rope_init_fnattention_scalingregister_bufferrJ   original_inv_freq)r(   rK   devicerJ   r,   s       r-   r"   CohereRotaryEmbedding.__init__G   s    6>**z&:M:Mt/T/T#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q((ZeD!%r/   c                 0   U R                   S S S 2S 4   R                  5       R                  UR                  S   SS5      nUS S 2S S S 24   R                  5       n[	        UR
                  R                  [        5      (       a0  UR
                  R                  S:w  a  UR
                  R                  OSn[        R                  " USS9   UR                  5       UR                  5       -  R                  SS5      n[        R                  " USSS	9nUR                  5       U R                  -  nUR                  5       U R                  -  n	S S S 5        WR                  UR                   S
9W	R                  UR                   S
94$ ! , (       d  f       N@= f)Nr   r1   r   mpscpuF)device_typeenabledr3   dimr4   )rJ   floatexpandshaperS   r]   rO   strr$   autocast	transposerepeat_interleavecosrZ   sinr5   r4   )
r(   xposition_idsinv_freq_expandedposition_ids_expandedrb   freqsembrn   ro   s
             r-   r=   CohereRotaryEmbedding.forwardX   sB    !MM$4-8>>@GGHZHZ[\H]_acde ,QaZ 8 > > @'1!((--'E'E!((--[`J`ahhmmfk^^UC&,,.1F1L1L1NNYYZ[]^_E))%;C'')d444C'')d444C	 D vvAGGv$cff177f&;;; DCs   BF
F)rZ   rK   rW   r\   rX   rY   rN   N)r@   rA   rB   rC   r$   Tensor__annotations__r   r"   no_gradr   r=   rD   rE   rF   s   @r-   rH   rH   D   s@    ll/| / /" ]]_<  <r/   rH   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )	CohereMLPh   c                   > [         TU ]  5         Xl        UR                  U l        UR                  U l        [
        R                  " U R                  U R                  SS9U l        [
        R                  " U R                  U R                  SS9U l        [
        R                  " U R                  U R                  SS9U l	        [        UR                     U l        g NFr+   )r!   r"   rK   r)   intermediate_sizer   Linear	gate_projup_proj	down_projr   
hidden_actact_fnr(   rK   r,   s     r-   r"   CohereMLP.__init__i   s    !--!'!9!94#3#3T5K5KRWXyy!1!143I3IPUV4#9#94;K;KRWXV../r/   c                     U R                  U R                  U R                  U5      5      U R                  U5      -  5      nU$ rw   )r   r   r   r   )r(   rp   r   s      r-   r=   CohereMLP.forwards   s6    NN4;;t~~a/@#ADLLQRO#ST	r/   )r   rK   r   r   r)   r   r   r?   rF   s   @r-   r|   r|   h   s    0 r/   r|   r:   n_repreturnc                     U R                   u  p#pEUS:X  a  U $ U SS2SS2SSS2SS24   R                  X#XU5      n U R                  X#U-  XE5      $ )z
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
r   N)ri   rh   reshape)r:   r   batchnum_key_value_headsslenhead_dims         r-   	repeat_kvr   x   s_    
 2?1D1D.Ez!!Qa"23::5W\dlmM  e(CTTTr/   modulequerykeyvalueattention_maskscalingdropoutkwargsc                 @   [        X R                  5      n[        X0R                  5      n	[        R                  " XR	                  SS5      5      U-  n
Ub"  US S 2S S 2S S 2S UR
                  S   24   nX-   n
[        R                  R                  U
S[        R                  S9R                  UR                  5      n
[        R                  R                  XU R                  S9n
[        R                  " X5      nUR	                  SS5      R                  5       nX4$ )Nr3   r   r1   )re   r4   )ptrainingr   )r   num_key_value_groupsr$   matmulrl   ri   r   
functionalsoftmaxr6   r5   r4   r   r   
contiguous)r   r   r   r   r   r   r   r   
key_statesvalue_statesattn_weightscausal_maskattn_outputs                r-   eager_attention_forwardr      s     3 ; ;<JU$?$?@L<<';';Aq'ABWLL!$Q1.D
0@0@0D.D%DE#1==((2U]](SVVW\WbWbcL==((6??([L,,|:K''1-88:K$$r/   c                 |    U SS S S24   nU SSS S24   n[         R                  " U* U/SS9R                  S5      nU$ )N.r3   r   r1   rd   r   )r$   stackflatten)rp   x1x2rot_xs       r-   rotate_halfr      sL    	
3!8B	
319BKK"b	r*2226ELr/   c                 &   U R                   nU R                  5       n UR                  5       nUR                  U5      nUR                  U5      nX-  [        U 5      U-  -   nX-  [        U5      U-  -   nUR	                  US9UR	                  US94$ )a  Applies Rotary Position Embedding to the query and key tensors.

Args:
    q (`torch.Tensor`): The query tensor.
    k (`torch.Tensor`): The key tensor.
    cos (`torch.Tensor`): The cosine part of the rotary embedding.
    sin (`torch.Tensor`): The sine part of the rotary embedding.
    position_ids (`torch.Tensor`, *optional*):
        Deprecated and unused.
    unsqueeze_dim (`int`, *optional*, defaults to 1):
        The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
        sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
        that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
        k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
        cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
        the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
    `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
rf   )r4   rg   	unsqueezer   r5   )	qkrn   ro   rq   unsqueeze_dimr4   q_embedk_embeds	            r-   apply_rotary_pos_embr      s    ( GGE		A		A
--
&C
--
&Cw;q>C/0Gw;q>C/0G::E:"GJJUJ$;;;r/   c                   D  ^  \ rS rSrSrSS\S\\   4U 4S jjjr\	" SSSS	9  SS
\
R                  S\\
R                  \
R                  4   S\\
R                     S\\   S\\
R                     S\\   S\\
R                  \\
R                     4   4S jj5       rSrU =r$ )CohereAttention   z=Multi-headed attention from 'Attention Is All You Need' paperrK   	layer_idxc                 R  > [         TU ]  5         Xl        X l        [	        USUR
                  UR                  -  5      U l        UR                  UR                  -  U l	        U R                  S-  U l
        UR                  U l        SU l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR
                  UR                  U R                  -  UR                  S9U l        [        R                  " UR                  U R                  -  UR
                  UR                  S9U l        UR(                  U l        U R(                  (       a_  [+        UR                  U R                  4UR,                  S9U l        [+        UR                  U R                  4UR,                  S9U l        g g )Nr   g      Tr   r)   r*   )r!   r"   rK   r   getattrr)   num_attention_headsr   r   r   r   attention_dropout	is_causalr   r   attention_biasq_projk_projv_projo_projuse_qk_normr   layer_norm_epsq_normk_normr(   rK   r   r,   s      r-   r"   CohereAttention.__init__   s   "
F4F4F&JdJd4de$*$>$>&B\B\$\!}}d*!'!9!9ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii&&68J8JQWQfQf
 "--)#77GVMbMbDK *#77GVMbMbDK r/   past_key_valuepast_key_values4.58new_nameversionr:   position_embeddingsr   cache_positionr   r   c                    UR                   S S n/ UQSPU R                  P7nU R                  U5      R                  U5      n	U R	                  U5      R                  U5      n
U R                  U5      R                  U5      nU R                  (       a"  U R                  U	5      n	U R                  U
5      n
U	R                  SS5      n	U
R                  SS5      n
UR                  SS5      nUu  p[        XX5      u  pUb$  XUS.nUR                  XU R                  U5      u  p[        nU R                  R                  S:w  a  [         U R                  R                     nU" U U	U
UU4U R"                  (       d  SOU R$                  U R&                  S.UD6u  nnUR(                  " / UQSP76 R+                  5       nU R-                  U5      nUU4$ )Nr1   r   r3   )ro   rn   r   eager        )r   r   )ri   r   r   viewr   r   r   r   r   rl   r   updater   r   rK   _attn_implementationr   r   r   r   r   r   r   )r(   r:   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   rn   ro   cache_kwargsattention_interfacer   r   s                     r-   r=   CohereAttention.forward   s    $))#2.88b8$--8{{=166|D[[/44\B
{{=166|D;;|4LZ0J#--a3))!Q/
#--a3&#7RU#[ &#&nUL'6'='=jX\XfXfht'u$J(?;;++w6"9$++:Z:Z"[$7	%
  $}}C$2H2HLL	%
 	%
!\ "));;;;FFHkk+.L((r/   )r   rK   r   r   r   r   r   r   r   r   r   r   r   r   rw   )NN)r@   rA   rB   rC   __doc__r   r   intr"   r   r$   rx   tupler	   
LongTensorr   r   r=   rD   rE   rF   s   @r-   r   r      s    G|   @ %0A6R ,0591)||1) #5<<#=>1) !.	1)
 "%1) !!1!121) -.1) 
u||Xell33	41) S1)r/   r   c                     ^  \ rS rSrS\S\4U 4S jjr\" SSSS9      SS	\R                  S
\
\R                     S\
\R                     S\
\   S\
\   S\
\R                     S\
\\R                  \R                  4      S\\   S\\R"                  \
\\R"                  \R"                  4      4   4S jj5       rSrU =r$ )CohereDecoderLayeri  rK   r   c                    > [         TU ]  5         UR                  U l        [        XS9U l        [        U5      U l        [        UR                  UR                  S9U l	        g )N)rK   r   r   )
r!   r"   r)   r   	self_attnr|   mlpr   r   input_layernormr   s      r-   r"   CohereDecoderLayer.__init__  sP    !--(LV$.F<N<NU[UjUjkr/   r   r   r   r   r:   r   rq   	use_cacher   r   r   r   c                     Un	U R                  U5      nU R                  " SUUUUUUUS.UD6u  pU R                  U5      nX-   U-   nU$ )a  
Args:
    hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
    attention_mask (`torch.FloatTensor`, *optional*):
        attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
        query_sequence_length, key_sequence_length)` if default attention is used.
    past_key_values (`Cache`, *optional*): cached past key and value projection states
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
    use_cache (`bool`, *optional*):
        If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
        (see `past_key_values`).
    cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
        Indices depicting the position of the input sequence tokens in the sequence
    position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
        Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
        with `head_dim` being the embedding dimension of each attention head.
)r:   r   rq   r   r   r   r    )r   r   r   )r(   r:   r   rq   r   r   r   r   r   residualhidden_states_attention_hidden_states_mlps                r-   r=   CohereDecoderLayer.forward$  st    > !,,];%)^^ 	&
')%+) 3	&
 	&
" !HH]3 :=NNr/   )r)   r   r   r   )NNNFNN)r@   rA   rB   rC   r   r   r"   r   r$   rx   r   r   r	   boolr   r   r   FloatTensorr=   rD   rE   rF   s   @r-   r   r     s   l| l l %0A6R 2637+/$)59KO.||. !.. u//0	.
 "%. D>. !!1!12. &eELL%,,,F&GH. -.. 
u  (51B1BEDUDU1U+V"WW	X. S.r/   r   c                   R    \ rS rSr% \\S'   SrSrS/rS/r	Sr
SrSrSrSr\\S.rSrg	)
CoherePreTrainedModeliV  rK   modelTr   r   )r:   
attentionsr   N)r@   rA   rB   rC   r   ry   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   r   _can_record_outputsrD   r   r/   r-   r   r   V  sQ    &*#-.#4"5N!"&+%r/   r   c                   "  ^  \ rS rSrS\4U 4S jjr\" 5       \       SS\\	R                     S\\	R                     S\\	R                     S\\   S\\	R                     S	\\	R                     S
\\   S\\   S\4S jj5       5       rSrU =r$ )CohereModelii  rK   c           	        > [         TU ]  U5        UR                  U l        UR                  U l        [
        R                  " UR                  UR                  U R                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[        X5      PM     sn5      U l        [        UR                  UR                  S9U l        [#        US9U l        SU l        U R)                  5         g s  snf )Nr   rK   F)r!   r"   pad_token_idpadding_idx
vocab_sizer   	Embeddingr)   embed_tokens
ModuleListrangenum_hidden_layersr   layersr   r   normrH   
rotary_embgradient_checkpointing	post_initr   s      r-   r"   CohereModel.__init__k  s     !.. ++LL):):F<N<NPTP`P`ammDI&JbJbDcdDcy2Dcd
 $1C1C&J_J_`	/v>&+# 	 es   C?	input_idsr   rq   r   inputs_embedsr   r   r   r   c           
      J   US L US L-  (       a  [        S5      eUc  U R                  U5      nU(       a  Uc  [        U R                  S9nUcD  Ub  UR	                  5       OSn	[
        R                  " XUR                  S   -   UR                  S9nUc  UR                  S5      n[        U R                  UUUUUS9n
UnU R                  X5      nU R                  S U R                  R                    H  nU" U4U
UUUUS.UD6nM     U R                  U5      n[        UUS9$ )	Nz:You must specify exactly one of input_ids or inputs_embedsr
  r   r   )r]   )rK   input_embedsr   r   r   rq   )r   rq   r   r   r   )last_hidden_stater   )
ValueErrorr  r
   rK   get_seq_lengthr$   arangeri   r]   r   r   r  r  r  r  r   )r(   r  r   rq   r   r  r   r   r   past_seen_tokensr   r:   r   decoder_layers                 r-   r=   CohereModel.forward{  sR    -t";<YZZ *.*;*;I*FM0*$++>O!CRC^==?de+0<< ]5H5H5K"KTaThTh,N )33A6L(;;&))+%
 &"oomJ![[)H4;;+H+HIM)*) /-$7 M J 		-0&++
 	
r/   )r  r  r  r  r  r  r  )NNNNNNN)r@   rA   rB   rC   r   r"   r   r   r   r$   r   rx   r	   r   r   r   r   r   r=   rD   rE   rF   s   @r-   r  r  i  s    |    151537+/5959$(8
E,,-8
 !.8
 u//0	8

 "%8
   1 128
 !!1!128
 D>8
 +,8
 
!8
  8
r/   r  c                     ^  \ rS rSrS/rSS0rSS/S/40rU 4S jr\\	           SS\
\R                     S	\
\R                     S
\
\R                     S\
\\\\R"                     4      S\
\R"                     S\
\R                     S\
\   S\
\   S\
\   S\
\R                     S\\\R                  4   S\\   S\4S jj5       5       rSrU =r$ )CohereForCausalLMi  zlm_head.weightlm_headcolwise_repr:   logitsc                 (  > [         TU ]  U5        [        U5      U l        UR                  U l        [
        R                  " UR                  UR                  SS9U l        UR                  U l	        UR                  U l
        U R                  5         g r   )r!   r"   r  r   r  r   r   r)   r&  logit_scaletie_word_embeddingsr  r   s     r-   r"   CohereForCausalLM.__init__  sq      (
 ++yy!3!3V5F5FUS!--#)#=#=  	r/   r  r   rq   r   r  labelsr   output_attentionsoutput_hidden_statesr   logits_to_keepr   r   c                    Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	U R                  " SUUUUUUUU	U
S.	UD6nUR                  n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nUU R                  -  nSnUb)  U R                  " SUX`R                   R                  S.UD6n[        UUUR                  UR                  UR                  S9$ )a  
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
    (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>> from transformers import AutoTokenizer, CohereForCausalLM

>> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")

>> prompt = "Hey, are you conscious? Can you talk to me?"
>> inputs = tokenizer(prompt, return_tensors="pt")

>> # Generate
>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```N)	r  r   rq   r   r  r   r.  r/  r   )r(  r-  r  )lossr(  r   r:   r   r   )rK   r.  r/  r   r  rS   r   slicer&  r*  loss_functionr  r   r   r:   r   )r(   r  r   rq   r   r  r-  r   r.  r/  r   r0  r   outputsr:   slice_indicesr(  r2  s                     r-   r=   CohereForCausalLM.forward  s(   N 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0:: ,
)%+'/!5),
 ,
  118B>SV8W8W~ot4]kmA}a,?@A$***%%pVF{{OeOepiopD%#33!//))
 	
r/   )r&  r*  r   r+  r  )NNNNNNNNNNr   )r@   rA   rB   rC   _tied_weights_keys_tp_plan_pp_planr"   r   r   r   r$   r   rx   r   r	   listr   r   r   r   r   r   r=   rD   rE   rF   s   @r-   r%  r%    sw   *+=)H_-z:;H	  151537KO59-1$(,0/35934H
E,,-H
 !.H
 u//0	H

 "%tE4E4E/F(F"GHH
   1 12H
 ))*H
 D>H
 $D>H
 'tnH
 !!1!12H
 c5<</0H
 +,H
 
 H
  H
r/   r%  )r%  r  r   )r   )Nr   )9typingr   r   r   r$   r   activationsr   cache_utilsr	   r
   
generationr   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.deprecationr   utils.genericr   configuration_coherer   Moduler   rH   r|   rx   r   r   rg   r   r   r   r   r   r   r  r%  __all__r   r/   r-   <module>rM     s  < - ,   ! . ) / B 9 O K F & I I 0 / .-bii -"!<BII !<H		  	UU\\ 	U# 	U%,, 	U& %II%<<% 
% <<	%
 U\\*% % % '(%4<<U)bii U)p73 7t O  $ K
' K
 K
\ Z
- Z
 Z
z Hr/   