
    cCi                        S r SSKJrJrJr  SSKrSSKJr  SSKJr  SSK	J
r
Jr  SSKJr  SS	KJr  SS
KJr  SSKJrJrJr  SSKJrJr  SSKJrJr  SSKJrJr  SSKJ r   SSK!J"r"J#r#J$r$J%r%  SSK&J'r'  SSK(J)r)  \$" 5       (       a  SSK*J+r+  SSK,J-r-  \%R\                  " \/5      r0 " S S\Rb                  5      r2S r3S3S jr4 " S S\Rb                  5      r5 S4S\Rb                  S\Rl                  S\Rl                  S\Rl                  S \\Rl                     S!\7S"\74S# jjr8 " S$ S%\Rb                  5      r9 " S& S'\5      r:\" " S( S)\5      5       r;\" " S* S+\;5      5       r< " S, S-\;\5      r= " S. S/\\;5      r> " S0 S1\\;5      r?/ S2Qr@g)5zPyTorch Persimmon model.    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs) GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringcan_return_tupleis_torch_flex_attn_availablelogging)deprecate_kwarg   )PersimmonConfig)	BlockMask)make_flex_block_causal_maskc                      ^  \ rS rSr% \R
                  \S'   SS\4U 4S jjjr\R                  " 5       \
S 5       5       rSrU =r$ )PersimmonRotaryEmbedding;   inv_freqconfigc                   > [         TU ]  5         [        US5      (       aZ  [        UR                  [
        5      (       a;  UR                  R                  SUR                  R                  S5      5      U l        OSU l        UR                  U l	        UR                  U l
        Xl        [        U R                     U l        U R                  U R                  U5      u  o0l        U R                  SUSS9  U R                   U l        g )Nrope_scaling	rope_typetypedefaultr$   F)
persistent)super__init__hasattr
isinstancer'   dictgetr(   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr%   r   rope_init_fnattention_scalingregister_bufferr$   original_inv_freq)selfr%   devicer$   	__class__s       j/home/james-whalen/.local/lib/python3.13/site-packages/transformers/models/persimmon/modeling_persimmon.pyr-   !PersimmonRotaryEmbedding.__init__>   s    6>**z&:M:Mt/T/T#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q((ZeD!%    c                 b   U R                   S S S 2S 4   R                  5       R                  UR                  S   SS5      R	                  UR
                  5      nUS S 2S S S 24   R                  5       n[        UR
                  R                  [        5      (       a0  UR
                  R                  S:w  a  UR
                  R                  OSn[        R                  " USS9   UR                  5       UR                  5       -  R                  SS5      n[        R                  " Xf4SS	9nUR                  5       U R                  -  nUR                  5       U R                  -  n	S S S 5        WR	                  UR                   S
9W	R	                  UR                   S
94$ ! , (       d  f       N@= f)Nr   r   mpscpuF)device_typeenabled   dim)dtype)r$   floatexpandshapetor:   r/   r)   strtorchautocast	transposecatcosr6   sinrH   )
r9   xposition_idsinv_freq_expandedposition_ids_expandedrC   freqsembrR   rS   s
             r<   forward PersimmonRotaryEmbedding.forwardO   sR    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E'E!((--[`J`ahhmmfk^^UC&,,.1F1L1L1NNYYZ[]^_E))UN3C'')d444C'')d444C	 D vvAGGv$cff177f&;;; DCs   $BF  
F.)r6   r%   r3   r8   r4   r5   r(   N)__name__
__module____qualname____firstlineno__rN   Tensor__annotations__r   r-   no_gradr   rZ   __static_attributes____classcell__r;   s   @r<   r"   r"   ;   s@    ll/ / /" ]]_<  <r>   r"   c                     U SSU R                   S   S-  24   nU SU R                   S   S-  S24   n[        R                  " U* U4SS9$ )z*Rotates half the hidden dims of the input..Nr@   rE   rF   )rK   rN   rQ   )rT   x1x2s      r<   rotate_halfrj   `   sZ    	
3"!''"+"""	#B	
3q ""	#B99rc2YB''r>   c                     UR                  U5      nUR                  U5      nX-  [        U 5      U-  -   nX-  [        U5      U-  -   nXg4$ )a  Applies Rotary Position Embedding to the query and key tensors.

Args:
    q (`torch.Tensor`): The query tensor.
    k (`torch.Tensor`): The key tensor.
    cos (`torch.Tensor`): The cosine part of the rotary embedding.
    sin (`torch.Tensor`): The sine part of the rotary embedding.
    position_ids (`torch.Tensor`, *optional*):
        Deprecated and unused.
    unsqueeze_dim (`int`, *optional*, defaults to 1):
        The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
        sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
        that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
        k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
        cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
        the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
    `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
)	unsqueezerj   )qkrR   rS   rU   unsqueeze_dimq_embedk_embeds           r<   apply_rotary_pos_embrr   h   sS    ( --
&C
--
&Cw;q>C/0Gw;q>C/0Gr>   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )PersimmonMLP   c                   > [         TU ]  5         [        R                  " UR                  UR
                  5      U l        [        R                  " UR
                  UR                  5      U l        [        UR                     U l
        g r\   )r,   r-   r   Linearhidden_sizeintermediate_sizedense_h_to_4hdense_4h_to_hr   
hidden_actactr9   r%   r;   s     r<   r-   PersimmonMLP.__init__   s^    YYv'9'96;S;STYYv'?'?ASAST&++,r>   c                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ r\   )rz   r}   r{   )r9   hidden_statess     r<   rZ   PersimmonMLP.forward   s6    **=9/**=9r>   )r}   r{   rz   )r]   r^   r_   r`   r-   rZ   rd   re   rf   s   @r<   rt   rt      s    - r>   rt   modulequerykeyvalueattention_maskscalingdropoutc                    [         R                  " XR                  SS5      5      U-  nUb"  US S 2S S 2S S 2S UR                  S   24   n	X-   n[        R
                  R                  US[         R                  S9R                  UR                  5      n[        R
                  R                  XU R                  S9n[         R                  " X5      n
U
R                  SS5      R                  5       n
X4$ )NrE   r   r@   )rG   rH   )ptrainingr   )rN   matmulrP   rK   r   
functionalsoftmaxfloat32rL   rH   r   r   
contiguous)r   r   r   r   r   r   r   kwargsattn_weightscausal_maskattn_outputs              r<   eager_attention_forwardr      s     <<}}Q':;gEL!$Q1o		"o%=>#1==((2U]](SVVW\WbWbcL==((6??([L,,|3K''1-88:K$$r>   c                   
  ^  \ rS rSrSrSS\S\\   4U 4S jjjrS\	R                  S\\	R                  \	R                  \	R                  4   4S jr\" S	S
SS9       SS\	R                  S\\	R                     S\\	R                     S
\\   S\S\S\\	R                     S\\\	R                  \	R                  4      S\\   S\\	R                  \\	R                     \\\	R                        4   4S jj5       rSrU =r$ )PersimmonAttention   z=Multi-headed attention from 'Attention Is All You Need' paperr%   	layer_idxc                   > [         TU ]  5         Xl        X l        Uc-  [        R                  SU R                  R                   S35        UR                  U l        UR                  U l
        U R                  U R                  -  U l        UR                  U l        [        U R                  UR                  -  5      U l        SU l        U R                  U R                  -  U R                  :w  a&  [#        SU R                   SU R                   S35      e[$        R&                  " U R                  SU R                  -  SS9U l        [$        R&                  " U R                  U R                  -  U R                  SS9U l        UR,                  U l        U R                  S	-  U l        U R,                  (       ax  [$        R0                  " UR                  U R                  -  UR2                  SS
9U l        [$        R0                  " UR                  U R                  -  UR2                  SS
9U l        [$        R8                  " UR:                  5      U l        [=        U R                  S9U l        g )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r   biasg      )epselementwise_affiner%   ) r,   r-   r%   r   loggerwarning_oncer;   r]   rx   num_attention_heads	num_headshead_dim
rope_thetaintpartial_rotary_factorrotary_ndims	is_causal
ValueErrorr   rw   query_key_valuedenseqk_layernormr   	LayerNormlayer_norm_epsq_layernormk_layernormDropoutattention_dropoutr"   
rotary_embr9   r%   r   r;   s      r<   r-   PersimmonAttention.__init__   s   " !8!8 9 :, , "--33((DNN: ++0L0L LMMMDNN*t/?/??QRVRbRbQc$T^^$4B8   "yy)9)91t?O?O;OVZ[YYt~~=t?O?OVZ[
"//}}d*!||""dnn4&:O:Odh D  "||""dnn4&:O:Odh D "$F,D,D!E2$++Fr>   	fused_qkvreturnc                     UR                   u  p#nUR                  X#U R                  SU R                  5      nUSSSS24   USSSS24   USSSS24   4$ )a  
Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
storage as `fused_qkv`

Args:
    fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]

Returns:
    query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
    value: [batch_size, seq_length, num_heads, head_dim]
r   .r   Nr   rE   )rK   viewr   r   )r9   r   
batch_size
seq_lengththree_times_hidden_sizes        r<   _split_headsPersimmonAttention._split_heads   s^     ;D//7
 7NN:4>>1dmm\	a#YsAqy%99S!QY;OOOr>   past_key_valuepast_key_values4.58new_nameversionr   r   rU   output_attentions	use_cachecache_positionposition_embeddingsr   c	                 0   UR                  5       u  pnU R                  U5      nU R                  U5      u  pnU R                  (       a"  U R	                  U5      nU R                  U5      nUR                  SS5      nUR                  SS5      nUR                  SS5      nUu  nnUSS U R                  24   USU R                  S 24   nnUSS U R                  24   USU R                  S 24   nn[        UUUU5      u  nn[        R                  " UU4SS9n[        R                  " UU4SS9nUb2  UUU R                  US.nUR                  UUU R                  U5      u  nn[        nU R                  R                  S:w  a  [         U R                  R                     nU" U UUUU4U R"                  (       d  SOU R                  R$                  U R&                  S	.U	D6u  nnUR)                  XS5      nU R+                  U5      nU(       d  S nUU4$ )
Nr   rE   .r@   rF   )rS   rR   partial_rotation_sizer   eager        )r   r   )sizer   r   r   r   r   rP   r   rr   rN   rQ   updater   r   r%   _attn_implementationr   r   r   r   reshaper   )r9   r   r   rU   r   r   r   r   r   r   bszq_len_r   query_states
key_statesvalue_statesrR   rS   	query_rot
query_passkey_rotkey_passcache_kwargsattention_interfacer   r   s                              r<   rZ   PersimmonAttention.forward   sO    &**,A ((7	 483D3DY3O0<++L9L))*5J $--a3#--a3))!Q/
&S 1 1 1112d//112 	
 s/d////0sD--//0 
 2)Wc3O	7 yy)Z!8bAYY2;
& )-):):"0	L (7'='=j,X\XfXfht'u$J(?;;++w6"9$++:Z:Z"[$7	%
  $}}C$++2O2OLL	%
 	%
!\ "))#b9jj- LL((r>   )r   r%   r   r   rx   r   r   r   r   r   r   r   r   r   r   r   r\   NNNFFNN)r]   r^   r_   r`   __doc__r   r   r   r-   rN   ra   tupler   r   
LongTensorr	   boolr   r   rZ   rd   re   rf   s   @r<   r   r      sl   G$G $G8C= $G $GLPell PuU\\5<<Y^YeYe=e7f P  %0A6R 2637+/"'59KON)||N) !.N) u//0	N)
 "%N)  N) N) !!1!12N) &eELL%,,,F&GHN) -.N) 
u||Xell3XeELL>Q5RR	SN) SN)r>   r   c                     ^  \ rS rSrS\S\4U 4S jjr\" SSSS9       SS	\R                  S
\
\R                     S\
\R                     S\
\   S\
\   S\
\   S\
\R                     S\
\\R                  \R                  4      S\\   S\\R"                  \
\\R"                  \R"                  4      4   4S jj5       rSrU =r$ )PersimmonDecoderLayeri4  r%   r   c                   > [         TU ]  5         UR                  U l        [        XS9U l        [        U5      U l        [        R                  " UR                  UR                  S9U l
        [        R                  " UR                  UR                  S9U l        [        R                  " UR                  5      U l        g )N)r%   r   r   )r,   r-   rx   r   	self_attnrt   mlpr   r   r   input_layernormpost_attention_layernormr   hidden_dropoutr   r   s      r<   r-   PersimmonDecoderLayer.__init__5  s    !--+6O'!||F,>,>FDYDYZ(*V5G5GVMbMb(c%zz&"7"78r>   r   r   r   r   r   r   rU   r   r   r   r   r   r   c	                     Un
U R                  U5      nU R                  " SUUUUUUUUS.U	D6u  pX-   nUn
U R                  U5      nU R                  U5      nU R	                  U5      nX-   nU4nU(       a  X4-  nU$ )a  
Args:
    hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
    attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
        `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
    position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
        Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
        `[0, config.n_positions - 1]`.
        [What are position IDs?](../glossary#position-ids)
    past_key_values (`Cache`, *optional*):
        cached past key and value projection states
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
    use_cache (`bool`, *optional*):
        If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
        (see `past_key_values`).
    cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
        Indices depicting the position of the input sequence tokens in the sequence
    position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
        Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
        with `head_dim` being the embedding dimension of each attention head.
)r   r   rU   r   r   r   r   r    )r   r   r   r   r   )r9   r   r   rU   r   r   r   r   r   r   residualself_attn_weightsoutputss                r<   rZ   PersimmonDecoderLayer.forward>  s    J !,,]; ,0>> 
,
')%+/) 3
,
 
,
( !0 !55mD/]3%0 "++Gr>   )r   rx   r   r   r   r   r   )r]   r^   r_   r`   r   r   r-   r   rN   ra   r   r   r	   r   r   r   r   FloatTensorrZ   rd   re   rf   s   @r<   r   r   4  s2   9 93 9 %0A6R 2637+/,1$)59KOC||C !.C u//0	C
 "%C $D>C D>C !!1!12C &eELL%,,,F&GHC -.C 
u  (51B1BEDUDU1U+V"WW	XC SCr>   r   c                   H    \ rS rSr% \\S'   SrSrS/rSr	Sr
SrSrSrS rSrg	)
PersimmonPreTrainedModeli  r%   modelTr   r   c                    U R                   R                  n[        U[        R                  5      (       aW  UR
                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       ad  UR
                  R                  R                  SUS9  UR                  b2  UR
                  R                  UR                     R                  5         g g [        U[        R                  5      (       aJ  UR
                  R                  R                  S5        UR                  R                  R                  5         g g )Nr   )meanstdg      ?)r%   initializer_ranger/   r   rw   weightdatanormal_r   zero_	Embeddingpadding_idxr   fill_)r9   r   r   s      r<   _init_weights&PersimmonPreTrainedModel._init_weights  s   kk++fbii((MM&&CS&9{{&  &&( '--MM&&CS&9!!-""6#5#56<<> .--MM$$S)KK""$ .r>   r   N)r]   r^   r_   r`   r   rb   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_can_compile_fullgraph_supports_sdpa_supports_flash_attn_supports_attention_backendr  rd   r   r>   r<   r   r     s?    &*#01"3!N"&%r>   r   c                     ^  \ rS rSrSrS\4U 4S jjr\\         SS\	\
R                     S\	\
R                     S\	\
R                     S\	\   S	\	\
R                     S
\	\   S\	\   S\	\   S\	\
R                     S\\   S\4S jj5       5       r SS\\
R                  S4   S\
R                  S\
R                  S\S\4
S jjr\S\
R                  S\S\S\
R0                  S\
R                  S\4S j5       rSrU =r$ )PersimmonModeli  z
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PersimmonDecoderLayer`]

Args:
    config: PersimmonConfig
r%   c           	        > [         TU ]  U5        UR                  U l        UR                  U l        [
        R                  " UR                  UR                  U R                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[        X5      PM     sn5      U l        [
        R                  " UR                  UR                  S9U l        [#        US9U l        SU l        U R)                  5         g s  snf )Nr   r   F)r,   r-   pad_token_idr  
vocab_sizer   r  rx   embed_tokens
ModuleListrangenum_hidden_layersr   layersr   r   final_layernormr"   r   gradient_checkpointing	post_initr   s      r<   r-   PersimmonModel.__init__  s     !.. ++LL):):F<N<NPTP`P`ammGLVMeMeGfgGf)"65Gfg
  "||F,>,>FDYDYZ2&A&+# hs   D
	input_idsr   rU   r   inputs_embedsr   r   output_hidden_statesr   r   r   c
                    Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nUS L US L-  (       a  [	        S5      eU R
                  (       a/  U R                  (       a  U(       a  [        R                  S5        SnU(       a  Uc  [        U R                   S9nUc  U R                  U5      nU	cD  Ub  UR                  5       OSn[        R                  " XUR                  S   -   UR                  S9n	Uc  U	R!                  S5      nU R#                  X%XU5      nUnU R%                  X5      nU(       a  SOS nU(       a  SOS nU R&                   H7  nU(       a  X4-  nU" U4UUUUUU	US	.U
D6nUS   nU(       d  M.  UUS   4-  nM9     U R)                  U5      nU(       a  X4-  n[+        UUUUS
9$ )Nz:You must specify exactly one of input_ids or inputs_embedszZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr   r   r   r:   r   )r   rU   r   r   r   r   r   )last_hidden_stater   r   
attentions)r%   r   r  r   r   r  r   r   r   r
   r  get_seq_lengthrN   arangerK   r:   rl   _update_causal_maskr   r  r  r   )r9   r  r   rU   r   r  r   r   r  r   r   past_seen_tokensr   r   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                      r<   rZ   PersimmonModel.forward  s    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==##p "	0*$++>O  --i8M!CRC^==?de"\\ ]5H5H5K"KTaThThN )33A6L..>L]
 & #oomJ #7BD0d![[M#!%55!)
*) /"3#-$7
 
M *!,M  =#3"55' )* ,,];  !11&+++%	
 	
r>   r   input_tensorc           	         U R                   R                  S:X  a  Ub  US:H  R                  5       (       a  U$ g U R                   R                  S:X  a,  [        U[        R
                  5      (       a  [        U5      nU$ Ub  UR                  5       OSnUb  UR                  OSnU R                   R                  S:X  a5  U(       d.  U(       d'  [        R                  " UUUU R                  S9(       a  g UR                  nUR                  S   n	U(       a  UR                  5       n
O5[        U[        R
                  5      (       a  UR                  S	   OXi-   S-   n
U R                  UU	U
UUUR                  S   S
9nU R                   R                  S:X  aZ  UbW  UR                   R"                  S;   a=  U(       d6  [        R$                  " U5      R&                  n[        R(                  " X5      nU$ )Nflash_attention_2r   flex_attentionr   Fsdpa)r  past_key_values_lengthis_trainingr   r@   )sequence_lengthtarget_lengthrH   r   r   )cudaxpunpu)r%   r   anyr/   rN   ra   r    r$  is_compileabler   _ignore_causal_mask_sdpar   rH   rK   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr:   r)   finfomin_unmask_unattended)r9   r   r-  r   r   r   r'  using_compilable_cacherH   r4  r5  r   	min_dtypes                r<   r&  "PersimmonModel._update_causal_mask  s    ;;++/BB)~/D.I.I.K.K%%;;++/??.%,,77!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell;; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCK[Kr>   r4  r5  rH   r   c                    U b  U R                  5       S:X  a  U nU$ [        R                  " U5      R                  n[        R                  " X4XUR
                  S9nUS:w  a  [        R                  " USS9nU[        R                  " X$R
                  S9UR                  SS5      :  -  nUSSSS2SS24   R                  USSS5      nU b  UR                  5       nU R                  S   n	USS2SS2SS2SU	24   U SS2SSSS24   R                  UR
                  5      -   n
U
S:H  n
USS2SS2SS2SU	24   R                  X5      USS2SS2SS2SU	24'   U$ )	a  
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

Args:
    attention_mask (`torch.Tensor`):
        A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
        `(batch_size, 1, query_length, key_value_length)`.
    sequence_length (`int`):
        The sequence length being processed.
    target_length (`int`):
        The target length: when generating with static cache, the mask should be as long as the static cache,
        to account for the 0 padding, the part of the cache that is not filled yet.
    dtype (`torch.dtype`):
        The dtype to use for the 4D attention mask.
    cache_position (`torch.Tensor`):
        Indices depicting the position of the input sequence tokens in the sequence.
    batch_size (`torch.Tensor`):
        Batch size.
N   )
fill_valuerH   r:   r   )diagonalr!  r@   r   )rG   rN   r>  r?  fullr:   triur%  r   rJ   clonerK   rL   masked_fill)r   r4  r5  rH   r   r   r   r   rB  mask_lengthpadding_masks              r<   r=  DPersimmonModel._prepare_4d_causal_attention_mask_with_cache_position\  s}   > %.*<*<*>!*C(K* ' E*..I** 0Y\j\q\qK !##jjqA5<<>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c 6Aq!\k\12 r>   )r  r  r  r  r  r   r  )	NNNNNNNNN)F)r]   r^   r_   r`   r   r   r-   r   r   r   rN   r   ra   r	   r   r   r   r   r   rZ   r   r&  staticmethodr   rH   r=  rd   re   rf   s   @r<   r  r    s    "  151537+/59$(,0/359X
E,,-X
 !.X
 u//0	X

 "%X
   1 12X
 D>X
 $D>X
 'tnX
 !!1!12X
 -.X
 
!X
  X
B #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r>   r  c                   h  ^  \ rS rSrS/rU 4S jr\\           SS\\	R                     S\\	R                     S\\	R                     S\\   S\\	R                     S	\\	R                     S
\\   S\\   S\\   S\\	R                     S\\\	R                  4   S\4S jj5       5       rSrU =r$ )PersimmonForCausalLMi  zlm_head.weightc                    > [         TU ]  U5        [        U5      U l        UR                  U l        [
        R                  " UR                  UR                  SS9U l        U R                  5         g )NFr   )
r,   r-   r  r   r  r   rw   rx   lm_headr  r~   s     r<   r-   PersimmonForCausalLM.__init__  sU     #F+
 ++yy!3!3V5F5FUS 	r>   r  r   rU   r   r  labelsr   r   r  r   logits_to_keepr   c                    Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	U R                  " SUUUUUUUU	U
S.	UD6nUR                  n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nSnUb*  U R                  " UU4SU R                   R                  0UD6n[        UUUR                  UR                  UR                  S9$ )u  
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
    (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>>> from transformers import AutoTokenizer, PersimmonForCausalLM

>>> model = PersimmonForCausalLM.from_pretrained("adept/persimmon-8b-base")
>>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base")

>>> prompt = "human: Hey, what should I eat for dinner?"
>>> inputs = tokenizer(prompt, return_tensors="pt")

>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n'
```N)	r  r   rU   r   r  r   r   r  r   r  )losslogitsr   r   r#  r   )r%   r   r  r   r"  r/   r   slicerS  loss_functionr  r   r   r   r#  )r9   r  r   rU   r   r  rU  r   r   r  r   rV  r   r   r   slice_indicesrY  rX  s                     r<   rZ   PersimmonForCausalLM.forward  s)   P 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0:: ,
)%+'/!5),
 ,
  118B>SV8W8W~ot4]kmA}a,?@A%%  ;;11 	D &#33!//))
 	
r>   )rS  r   r  )NNNNNNNNNNr   )r]   r^   r_   r`   _tied_weights_keysr-   r   r   r   rN   r   ra   r	   r   r   r   r   r   rZ   rd   re   rf   s   @r<   rQ  rQ    s2   *+  151537+/59-1$(,0/35934M
E,,-M
 !.M
 u//0	M

 "%M
   1 12M
 ))*M
 D>M
 $D>M
 'tnM
 !!1!12M
 c5<</0M
 
 M
  M
r>   rQ  c                       \ rS rSrSrg)"PersimmonForSequenceClassificationi  r   Nr]   r^   r_   r`   rd   r   r>   r<   r`  r`    s    fir>   r`  c                       \ rS rSrSrg)PersimmonForTokenClassificationi  r   Nra  r   r>   r<   rc  rc    s    `cr>   rc  )rQ  r  r   r`  rc  )Nr   )r   )Ar   typingr   r   r   rN   r   activationsr   cache_utilsr	   r
   
generationr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_layersr   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   utils.deprecationr   configuration_persimmonr   !torch.nn.attention.flex_attentionr   integrations.flex_attentionr    
get_loggerr]   r   Moduler"   rj   rr   rt   ra   rI   r   r   r   r   r  rQ  r`  rc  __all__r   r>   r<   <module>rw     s  (  , ,   ! . ) > B 
 L F & \ \ 0 4  !!;J 
		H	%!<ryy !<J(8299 * %II%<<% 
% <<	%
 U\\*% % %.H) H)VN6 Nb % % %6 p- p pf\
3_ \
~ j)IKc i d&CE] cr>   