
    cCiە                     D   S r SSKrSSKJr  SSKJr  SSKJrJ	r	  SSK
rSSKrSSKJr  SSKJr  SS	KJr  SS
KJr  SSKJrJr  SSKJrJr  SSKJr  SSKJrJr  SSKJ r J!r!J"r"J#r#  SSK$J%r%J&r&  SSK'J(r(J)r)  SSK*J+r+  \#RX                  " \-5      r.\\"" SS9 " S S\ 5      5       5       r/\\"" SS9 " S S\ 5      5       5       r0S r1 " S S\Rd                  5      r3 " S S\Rd                  5      r4 SCS \Rd                  S!\Rj                  S"\Rj                  S#\Rj                  S$\	\Rj                     S%\6S&\64S' jjr7 " S( S)\Rd                  5      r8 " S* S+\Rd                  5      r9 " S, S-\Rd                  5      r: " S. S/\Rd                  5      r; " S0 S1\Rd                  5      r< " S2 S3\5      r= " S4 S5\Rd                  5      r>\" " S6 S7\5      5       r?\" " S8 S9\?5      5       r@ " S: S;\Rd                  5      rA\"" S<S9 " S= S>\?5      5       rB\"" S?S9 " S@ SA\?5      5       rC/ SBQrDg)Dz,PyTorch VideoMAE (masked autoencoder) model.    N)deepcopy)	dataclass)CallableOptional)nn)MSELoss   )ACT2FN)GradientCheckpointingLayer)BaseModelOutputImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack) find_pruneable_heads_and_indicesprune_linear_layer)ModelOutputTransformersKwargsauto_docstringlogging)IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STD)can_return_tuplecheck_model_inputs   )VideoMAEConfigz[
    Class for VideoMAEDecoder's outputs, with potential hidden states and attentions.
    )custom_introc                       \ rS rSr% SrSr\\R                     \	S'   Sr
\\\R                        \	S'   Sr\\\R                        \	S'   Srg)VideoMAEDecoderOutput*   zx
logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
    Pixel reconstruction logits.
Nlogitshidden_states
attentions )__name__
__module____qualname____firstlineno____doc__r!   r   torchFloatTensor__annotations__r"   tupler#   __static_attributes__r$       h/home/james-whalen/.local/lib/python3.13/site-packages/transformers/models/videomae/modeling_videomae.pyr   r   *   sR    
 +/FHU&&'.8<M8E%"3"345<59Ju00129r/   r   zb
    Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions.
    c                       \ rS rSr% SrSr\\R                     \	S'   Sr
\\R                     \	S'   Sr\\\R                        \	S'   Sr\\\R                        \	S'   Srg)	VideoMAEForPreTrainingOutput;   z
loss (`torch.FloatTensor` of shape `(1,)`):
    Pixel reconstruction loss.
logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
    Pixel reconstruction logits.
Nlossr!   r"   r#   r$   )r%   r&   r'   r(   r)   r4   r   r*   r+   r,   r!   r"   r-   r#   r.   r$   r/   r0   r2   r2   ;   sg     )-D(5$$
%,*.FHU&&'.8<M8E%"3"345<59Ju00129r/   r2   c                 v  ^ U4S jn[         R                  " [        U 5       Vs/ s H
  o2" U5      PM     sn5      n[         R                  " USS2SSS24   5      USS2SSS24'   [         R                  " USS2SSS24   5      USS2SSS24'   [
        R                  " U5      R                  S5      $ s  snf )z Sinusoid position encoding tablec           
         > [        T5       Vs/ s H%  o[        R                  " SSUS-  -  T-  5      -  PM'     sn$ s  snf )Ni'     )rangenppower)positionhid_jd_hids     r0   get_position_angle_vec;get_sinusoid_encoding_table.<locals>.get_position_angle_vecU   s?    RWX]R^_R^288E1
+;e+CDDR^___s   ,>Nr   r7   r   )r9   arrayr8   sincosr*   r+   	unsqueeze)
n_positionr=   r>   pos_isinusoid_tables    `   r0   get_sinusoid_encoding_tablerG   Q   s    ` XX%PZJ[\J[5e<J[\]N ff^Aqt!tG%<=N1add7 ff^Aqt!tG%<=N1add7^,66q99	 ]s   B6c                   2   ^  \ rS rSrSrU 4S jrS rSrU =r$ )VideoMAEEmbeddings_   z/
Construct the patch and position embeddings.

c                    > [         TU ]  5         [        U5      U l        U R                  R                  U l        [        U R                  UR                  5      U l        Xl        g N)	super__init__VideoMAEPatchEmbeddingspatch_embeddingsnum_patchesrG   hidden_sizeposition_embeddingsconfigselfrT   	__class__s     r0   rN   VideoMAEEmbeddings.__init__e   sP     7 ?00<<#>t?O?OQWQcQc#d r/   c                    U R                  U5      nX0R                  R                  5       R                  U5      R	                  UR
                  SS9-   nUb'  UR                  u  pEnX2)    nUR                  USU5      nU$ )NTdevicecopy)rP   rS   detachtype_astor[   shapereshape)rV   pixel_valuesbool_masked_pos
embeddings
batch_size_num_channelss          r0   forwardVideoMAEEmbeddings.forwardn   s    **<8
  ":":"A"A"C"K"KJ"W"Z"Z$$4 #[ #
 


 &*4*:*:'J<#$45J#++JLIJr/   )rT   rQ   rP   rS   	r%   r&   r'   r(   r)   rN   ri   r.   __classcell__rW   s   @r0   rI   rI   _   s    
 r/   rI   c                   2   ^  \ rS rSrSrU 4S jrS rSrU =r$ )rO      ac  
Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels,
height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width //
patch_size).

c           	        > [         T	U ]  5         UR                  nUR                  nUR                  nUR
                  nUR                  nUR                  n[        U[        R                  R                  5      (       a  UOX"4n[        U[        R                  R                  5      (       a  UOX34nX l        X0l        [        U5      U l        US   US   -  US   US   -  -  X`R                  -  -  nX@l        Xl        [        R                  " UUU R                  US   US   4U R                  US   US   4S9U l        g )Nr   r   )in_channelsout_channelskernel_sizestride)rM   rN   
image_size
patch_sizerh   rR   
num_framestubelet_size
isinstancecollectionsabcIterableintrQ   r   Conv3d
projection)
rV   rT   ru   rv   rh   rR   rw   rx   rQ   rW   s
            r0   rN    VideoMAEPatchEmbeddings.__init__   s3   &&
&&
**((&&
**#-j+//:R:R#S#SZZdYq
#-j+//:R:R#S#SZZdYq
$$-]jm+
1A0NOS]ararSrs 	 )&))$$**JqM:a=I%%z!}jmD	
r/   c                    UR                   u  p#pEnX@R                  :w  a  [        S5      eXPR                  S   :w  d  X`R                  S   :w  a2  [        SU SU SU R                  S    SU R                  S    S3	5      eUR	                  SSSS	S
5      nU R                  U5      R                  S5      R                  SS5      nU$ )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.r   r   zInput image size (*z) doesn't match model (z).r7   r	      )ra   rh   
ValueErrorru   permuter   flatten	transpose)rV   rc   rf   rw   rh   heightwidthre   s           r0   ri   VideoMAEPatchEmbeddings.forward   s    >J>P>P;
e,,,w  __Q''5OOA4F+F$VHAeW4KDOO\]L^K__`aeapapqras`ttvw  $++Aq!Q:__\2::1=GG1M
r/   )ru   rh   rQ   rv   r   rx   rk   rm   s   @r0   rO   rO      s    
6 r/   rO   modulequerykeyvalueattention_maskscalingdropoutc                    [         R                  " XR                  SS5      5      U-  n[        R                  R                  US[         R                  S9R                  UR                  5      n[        R                  R                  XU R                  S9nUb  X-  n[         R                  " X5      n	U	R                  SS5      R                  5       n	X4$ )Nr]   )dimdtype)ptrainingr   r7   )r*   matmulr   r   
functionalsoftmaxfloat32r`   r   r   r   
contiguous)
r   r   r   r   r   r   r   kwargsattn_weightsattn_outputs
             r0   eager_attention_forwardr      s     <<}}R'<=GL ==((2U]](SVVW\WbWbcL ==((6??([L !#4,,|3K''1-88:K$$r/   c                      ^  \ rS rSrS\SS4U 4S jjrS	S\\R                     S\	\R                  \R                  4   4S jjr
SrU =r$ )
VideoMAESelfAttention   rT   returnNc                   > [         TU ]  5         UR                  UR                  -  S:w  a7  [	        US5      (       d&  [        SUR                   SUR                   S35      eXl        UR                  U l        [        UR                  UR                  -  5      U l        U R                  U R                  -  U l	        UR                  U l        U R                  S-  U l        SU l        [        R                  " UR                  U R                  SS9U l        [        R                  " UR                  U R                  SS9U l        [        R                  " UR                  U R                  SS9U l        UR&                  (       as  [        R(                  " [*        R,                  " U R                  5      5      U l        [        R(                  " [*        R,                  " U R                  5      5      U l        g S U l        S U l        g )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .g      Fbias)rM   rN   rR   num_attention_headshasattrr   rT   r}   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probr   	is_causalr   Linearr   r   r   qkv_bias	Parameterr*   zerosq_biasv_biasrU   s     r0   rN   VideoMAESelfAttention.__init__   s    : ::a?PVXhHiHi"6#5#5"6 7334A7  #)#=#= #&v'9'9F<V<V'V#W !558P8PP"??//5YYv1143E3EER
99V//1C1C%PYYv1143E3EER
??,,u{{43E3E'FGDK,,u{{43E3E'FGDKDKDKr/   	head_maskc                    UR                   u  p4nU R                  b  [        R                  " U R                  SS9OS n[
        R                  R                  XR                  R                  US9n[
        R                  R                  XR                  R                  U R                  S9n[
        R                  R                  XR                  R                  U R                  S9n	UR                  USU R                  U R                  5      R                  SS5      n
UR                  USU R                  U R                  5      R                  SS5      nU	R                  USU R                  U R                  5      R                  SS5      n[         nU R"                  R$                  S:w  a  [&        U R"                  R$                     nU" U UU
UUU R(                  U R*                  U R,                  (       d  SOU R.                  S	9u  pUR1                  5       S S
 U R2                  4-   nUR5                  U5      nX4$ )NF)requires_grad)inputweightr   r]   r   r7   eager        )r   r   r   r   )ra   r   r*   
zeros_liker   r   r   linearr   r   r   r   viewr   r   r   r   rT   _attn_implementationr   r   r   r   r   sizer   rb   )rV   r"   r   rf   
seq_lengthrg   k_biaskeysvaluesqueries	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapes                    r0   ri   VideoMAESelfAttention.forward   s   $1$7$7!
GK{{G^!!$++UCdh}}##-V\#]%%M**BSBSZ^ZeZe%f--&&]::CTCT[_[f[f&gIIj"d.F.FH`H`akklmopq	kk*b$2J2JDLdLdeoopqstull:r43K3KTMeMefppqrtuv(?;;++w6"9$++:Z:Z"[)<nnLL#}}C$2C2C	*
& #0"4"4"6s";t?Q?Q>S"S%--.EF--r/   )r   r   rT   r   r   r   r   r   r   r   r   r   rL   )r%   r&   r'   r(   r   rN   r   r*   Tensorr-   ri   r.   rl   rm   s   @r0   r   r      sP    ~ $ 4.0F .RWX]XdXdfkfrfrXrRs . .r/   r   c                      ^  \ rS rSrSrS\4U 4S jjrS\R                  S\R                  S\R                  4S jr	S	r
U =r$ )
VideoMAESelfOutputi  z
The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
rT   c                    > [         TU ]  5         [        R                  " UR                  UR                  5      U l        [        R                  " UR                  5      U l        g rL   )	rM   rN   r   r   rR   denseDropouthidden_dropout_probr   rU   s     r0   rN   VideoMAESelfOutput.__init__  sB    YYv1163E3EF
zz&"<"<=r/   r"   input_tensorr   c                 J    U R                  U5      nU R                  U5      nU$ rL   r   r   rV   r"   r   s      r0   ri   VideoMAESelfOutput.forward  s$    

=1]3r/   r   )r%   r&   r'   r(   r)   r   rN   r*   r   ri   r.   rl   rm   s   @r0   r   r     sB    
>~ >
U\\  RWR^R^  r/   r   c                      ^  \ rS rSrS\4U 4S jjrS\\   4S jrSS\	R                  S\\	R                     S\	R                  4S	 jjrS
rU =r$ )VideoMAEAttentioni#  rT   c                    > [         TU ]  5         [        U5      U l        [	        U5      U l        [        5       U l        g rL   )rM   rN   r   	attentionr   outputsetpruned_headsrU   s     r0   rN   VideoMAEAttention.__init__$  s0    .v6(0Er/   headsc                 6   [        U5      S:X  a  g [        XR                  R                  U R                  R                  U R
                  5      u  p[        U R                  R                  U5      U R                  l        [        U R                  R                  U5      U R                  l        [        U R                  R                  U5      U R                  l	        [        U R                  R                  USS9U R                  l        U R                  R                  [        U5      -
  U R                  l        U R                  R                  U R                  R                  -  U R                  l        U R
                  R                  U5      U l        g )Nr   r   r   )lenr   r   r   r   r   r   r   r   r   r   r   r   union)rV   r   indexs      r0   prune_headsVideoMAEAttention.prune_heads*  s   u:?7>>55t~~7Y7Y[_[l[l

  2$..2F2FN/0B0BEJ1$..2F2FN.t{{/@/@%QO .2^^-O-ORUV[R\-\*'+~~'I'IDNNLnLn'n$ --33E:r/   r"   r   r   c                 N    U R                  X5      u  p4U R                  X15      nU$ rL   )r   r   )rV   r"   r   self_attn_outputrg   r   s         r0   ri   VideoMAEAttention.forward<  s(    "nn]F-=r/   )r   r   r   rL   )r%   r&   r'   r(   r   rN   r   r}   r   r*   r   r   ri   r.   rl   rm   s   @r0   r   r   #  sR    "~ ";S ;$U\\ hu||>T `e`l`l  r/   r   c                   j   ^  \ rS rSrS\4U 4S jjrS\R                  S\R                  4S jrSr	U =r
$ )VideoMAEIntermediateiC  rT   c                   > [         TU ]  5         [        R                  " UR                  UR
                  5      U l        [        UR                  [        5      (       a  [        UR                     U l        g UR                  U l        g rL   )rM   rN   r   r   rR   intermediate_sizer   ry   
hidden_actstrr
   intermediate_act_fnrU   s     r0   rN   VideoMAEIntermediate.__init__D  s`    YYv1163K3KL
f''--'-f.?.?'@D$'-'8'8D$r/   r"   r   c                 J    U R                  U5      nU R                  U5      nU$ rL   r   r   )rV   r"   s     r0   ri   VideoMAEIntermediate.forwardL  s&    

=100?r/   r   r%   r&   r'   r(   r   rN   r*   r   ri   r.   rl   rm   s   @r0   r   r   C  s/    9~ 9U\\ ell  r/   r   c                      ^  \ rS rSrS\4U 4S jjrS\R                  S\R                  S\R                  4S jrSr	U =r
$ )	VideoMAEOutputiS  rT   c                    > [         TU ]  5         [        R                  " UR                  UR
                  5      U l        [        R                  " UR                  5      U l	        g rL   )
rM   rN   r   r   r   rR   r   r   r   r   rU   s     r0   rN   VideoMAEOutput.__init__T  sB    YYv779K9KL
zz&"<"<=r/   r"   r   r   c                 R    U R                  U5      nU R                  U5      nX-   nU$ rL   r   r   s      r0   ri   VideoMAEOutput.forwardY  s,    

=1]3%4r/   r   r   rm   s   @r0   r   r   S  s=    >~ >
U\\  RWR^R^  r/   r   c                      ^  \ rS rSrSrS\4U 4S jjrS
S\R                  S\	\R                     S\R                  4S jjr
S	rU =r$ )VideoMAELayeria  z?This corresponds to the Block class in the timm implementation.rT   c                 j  > [         TU ]  5         UR                  U l        SU l        [	        U5      U l        [        U5      U l        [        U5      U l	        [        R                  " UR                  UR                  S9U l        [        R                  " UR                  UR                  S9U l        g )Nr   eps)rM   rN   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormrR   layer_norm_epslayernorm_beforelayernorm_afterrU   s     r0   rN   VideoMAELayer.__init__d  s    '-'E'E$*6208$V, "V-?-?VEZEZ [!||F,>,>FDYDYZr/   r"   r   r   c                     U R                  U5      nU R                  X25      nXA-   nU R                  U5      nU R                  U5      nU R	                  XQ5      nU$ rL   )r  r   r  r
  r   )rV   r"   r   hidden_states_normattention_outputlayer_outputs         r0   ri   VideoMAELayer.forwardn  se    !22=A>>*<H )8 ++M:((6 {{<?r/   )r   r  r
  r  r  r   r	  rL   )r%   r&   r'   r(   r)   r   rN   r*   r   r   ri   r.   rl   rm   s   @r0   r  r  a  sG    I[~ [U\\ hu||>T `e`l`l  r/   r  c                   x   ^  \ rS rSrS\4U 4S jjrS	S\R                  S\\R                     S\	4S jjr
SrU =r$ )
VideoMAEEncoderi  rT   c                    > [         TU ]  5         Xl        [        R                  " [        UR                  5       Vs/ s H  n[        U5      PM     sn5      U l        SU l	        g s  snf )NF)
rM   rN   rT   r   
ModuleListr8   num_hidden_layersr  layergradient_checkpointing)rV   rT   rg   rW   s      r0   rN   VideoMAEEncoder.__init__  sR    ]]5IaIaCb#cCbaM&$9Cb#cd
&+# $ds   A&r"   r   r   c                 r    [        U R                  5       H  u  p4Ub  X#   OS nU" X5      nM     [        US9$ )Nlast_hidden_state)	enumerater  r   )rV   r"   r   ilayer_modulelayer_head_masks         r0   ri   VideoMAEEncoder.forward  s<    (4OA.7.CilO(HM  5 ??r/   )rT   r  r  rL   )r%   r&   r'   r(   r   rN   r*   r   r   r   ri   r.   rl   rm   s   @r0   r  r    sA    ,~ ,@U\\ @hu||>T @`o @ @r/   r  c                   T    \ rS rSr% \\S'   SrSrSrSS/r	Sr
SrSrSr\\S.rS	 rS
rg)VideoMAEPreTrainedModeli  rT   videomaerc   TrI   r  )r"   r#   c                 
   [        U[        R                  [        R                  45      (       ak  UR                  R
                  R                  SU R                  R                  S9  UR                  b%  UR                  R
                  R                  5         gg[        U[        R                  5      (       aJ  UR                  R
                  R                  5         UR                  R
                  R                  S5        gg)zInitialize the weightsr   )meanstdNg      ?)ry   r   r   r~   r   datanormal_rT   initializer_ranger   zero_r  fill_)rV   r   s     r0   _init_weights%VideoMAEPreTrainedModel._init_weights  s    fryy"))455 MM&&CT[[5R5R&S{{&  &&( '--KK""$MM$$S) .r/   r$   N)r%   r&   r'   r(   r   r,   base_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_supports_flex_attn_supports_attention_backendr  r   _can_record_outputsr0  r.   r$   r/   r0   r&  r&    sN    "$O&*#-?N"&&+

*r/   r&  c                      ^  \ rS rSrU 4S jrS rS r\" SS9\  SS\	R                  S\\	R                     S	\\	R                     S
\\   S\4
S jj5       5       rSrU =r$ )VideoMAEModeli  c                   > [         TU ]  U5        Xl        [        U5      U l        [        U5      U l        UR                  (       a  S U l        O.[        R                  " UR                  UR                  S9U l        U R                  5         g )Nr  )rM   rN   rT   rI   re   r  encoderuse_mean_pooling	layernormr   r  rR   r  	post_initrU   s     r0   rN   VideoMAEModel.__init__  sg     ,V4&v.""!DN\\&*<*<&BWBWXDN 	r/   c                 .    U R                   R                  $ rL   )re   rP   )rV   s    r0   get_input_embeddings"VideoMAEModel.get_input_embeddings  s    ///r/   c                     UR                  5        H7  u  p#U R                  R                  U   R                  R	                  U5        M9     g)z
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
N)itemsr>  r  r   r   )rV   heads_to_pruner  r   s       r0   _prune_headsVideoMAEModel._prune_heads  s<    
 +002LELLu%//;;EB 3r/   F)tie_last_hidden_statesrc   rd   r   r   r   c                     U R                  X0R                  R                  5      nU R                  X5      nU R	                  XSS9nUR
                  nU R                  b  U R                  U5      n[        US9$ )a  
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
    batch must have the same number of masked patches. If `None`, then all patches are considered. Sequence
    length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`.

Examples:

```python
>>> import av
>>> import numpy as np

>>> from transformers import AutoImageProcessor, VideoMAEModel
>>> from huggingface_hub import hf_hub_download

>>> np.random.seed(0)


>>> def read_video_pyav(container, indices):
...     '''
...     Decode the video with PyAV decoder.
...     Args:
...         container (`av.container.input.InputContainer`): PyAV container.
...         indices (`list[int]`): List of frame indices to decode.
...     Returns:
...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
...     '''
...     frames = []
...     container.seek(0)
...     start_index = indices[0]
...     end_index = indices[-1]
...     for i, frame in enumerate(container.decode(video=0)):
...         if i > end_index:
...             break
...         if i >= start_index and i in indices:
...             frames.append(frame)
...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
...     '''
...     Sample a given number of frame indices from the video.
...     Args:
...         clip_len (`int`): Total number of frames to sample.
...         frame_sample_rate (`int`): Sample every n-th frame.
...         seg_len (`int`): Maximum allowed index of sample's last frame.
...     Returns:
...         indices (`list[int]`): List of sampled frame indices
...     '''
...     converted_len = int(clip_len * frame_sample_rate)
...     end_idx = np.random.randint(converted_len, seg_len)
...     start_idx = end_idx - converted_len
...     indices = np.linspace(start_idx, end_idx, num=clip_len)
...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
...     return indices


>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)

>>> # sample 16 frames
>>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)

>>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
>>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base")

>>> # prepare video for the model
>>> inputs = image_processor(list(video), return_tensors="pt")

>>> # forward pass
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 1568, 768]
```r   r  )get_head_maskrT   r  re   r>  r  r@  r   )rV   rc   rd   r   r   embedding_outputencoder_outputssequence_outputs           r0   ri   VideoMAEModel.forward  sm    | &&y++2O2OP	??<I+/<<8H<+^);;>>%"nn_=OAAr/   )rT   re   r>  r@  )NN)r%   r&   r'   r(   rN   rD  rI  r   r   r*   r+   r   
BoolTensorr   r   r   r   ri   r.   rl   rm   s   @r0   r<  r<    s    0C u5 7;,0	eB''eB "%"2"23eB ELL)	eB
 +,eB 
eB  6eBr/   r<  c                   V   ^  \ rS rSrS\4U 4S jjrS\R                  S\4S jr	Sr
U =r$ )VideoMAEDecoderi2  rT   c                   > [         TU ]  5         UR                  UR                  -  UR                  S-  -  n[        U5      nUR                  Ul        UR                  Ul	        UR                  Ul        UR                  Ul        [        R                  " [!        UR                  5       Vs/ s H  n[#        U5      PM     sn5      U l        [        R&                  " UR                  5      U l        US:  a!  [        R*                  " UR                  U5      O[        R,                  " 5       U l        SU l        X0l        g s  snf )Nr7   r   F)rM   rN   rh   rx   rv   r   decoder_hidden_sizerR   decoder_num_hidden_layersr  decoder_num_attention_headsr   decoder_intermediate_sizer   r   r  r8   r  decoder_layersr  normr   Identityheadr  rT   )rV   rT   decoder_num_labelsdecoder_configrg   rW   s        r0   rN   VideoMAEDecoder.__init__3  s
   #0063F3FFIZIZ\]I]]!&)%+%?%?"+1+K+K(-3-O-O*+1+K+K( mm49&:Z:Z4[\4[q]>*4[\
 LL!;!;<	I[^_I_BIIf002DEegepeper 		 ',#$ ]s   /Er"   return_token_numc                     U R                    H
  nU" US S9nM     US:  a  US S 2U* S 24   nU R                  U5      nU R                  U5      n[        US9$ )NrM  r   )r!   )r[  r\  r^  r   )rV   r"   rb  r"  r!   s        r0   ri   VideoMAEDecoder.forwardI  sg     //L($GM 0 a)!.>->-?*?@M 		-0=)$F33r/   )rT   r[  r  r^  r\  )r%   r&   r'   r(   r   rN   r*   r   r}   ri   r.   rl   rm   s   @r0   rU  rU  2  s+    %~ %,4U\\ 4S 4 4r/   rU  zb
    The VideoMAE Model transformer with the decoder on top for self-supervised pre-training.
    c                      ^  \ rS rSrU 4S jr\\ S
S\R                  S\R                  S\
\R                     S\\   S\4
S jj5       5       rS	rU =r$ )VideoMAEForPreTrainingiX  c                   > [         TU ]  U5        Xl        [        U5      U l        [
        R                  " UR                  UR                  SS9U l	        [
        R                  " [        R                  " SSUR                  5      5      U l        [        U R                  R                  R                   UR                  5      U l        [%        U5      U l        U R)                  5         g )NFr   r   )rM   rN   rT   r<  r'  r   r   rR   rW  encoder_to_decoderr   r*   r   
mask_tokenrG   re   rQ   rS   rU  decoderrA  rU   s     r0   rN   VideoMAEForPreTraining.__init__^  s     %f-"$))F,>,>@Z@Zaf"g,,u{{1a9S9S'TU#>MM$$00&2L2L$
  'v. 	r/   rc   rd   r   r   r   c                    U R                   " U4X#S.UD6nUR                  nU R                  U5      nUR                  u  pxn	Uc  [	        S5      eU R
                  R                  USS5      R                  U5      n
U
R                  5       R                  UR                  SS9n
X)    R                  USU	5      nX   R                  USU	5      n[        R                  " Xk-   U R                  U-   /SS9nU R                  XR                  S   5      nUR                   nSn[        R"                  " 5          U R$                  R&                  S	:w  a  UnOUR                  nUR(                  n[        R*                  " [,        5      R                  UUS
9SSSS2SS4   n[        R*                  " [.        5      R                  UUS
9SSSS2SS4   nUU-  U-   nUR                  u  nnn	nnU R$                  R0                  U R$                  R2                  nnU R$                  R4                  (       a  UR7                  UUU-  UU	UU-  UUU-  U5      nUR9                  SSSSSSSS	5      R;                  5       nUR7                  UUU-  U-  U-  U-  U-  UU-  U-  U	5      nUUR=                  SSS9-
  UR?                  SSSS9RA                  5       S-   -  nUR7                  UUU-  U-  U-  U-  U-  UU-  U-  U	-  5      nOU R$                  R&                  S	:w  a  [	        S5      eUR7                  UUU-  UU	UU-  UUU-  U5      nUR9                  SSSSSSSS	5      R;                  5       nUR7                  UUU-  U-  U-  U-  U-  UU-  U-  U	-  5      nUR                  u  pxn	UU   R                  USU	5      nSSS5        [C        5       nU" UW5      n[E        UUURF                  URH                  S9$ ! , (       d  f       NA= f)ae  
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
    Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
    batch must have the same number of masked patches. Sequence length is `(num_frames // tubelet_size) *
    (image_size // patch_size) ** 2`.

Examples:
```python
>>> from transformers import AutoImageProcessor, VideoMAEForPreTraining
>>> import numpy as np
>>> import torch

>>> num_frames = 16
>>> video = list(np.random.randint(0, 256, (num_frames, 3, 224, 224)))

>>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
>>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base")

>>> pixel_values = image_processor(video, return_tensors="pt").pixel_values

>>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2
>>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame
>>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool()

>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss = outputs.loss
```)rd   r   Nz!One must provided a boolean mask r]   TrZ   r   r   r	   )r[   r   r   r      r7         r   )r   keepdim)r   unbiasedrp  gư>zQCan't unnormalize non-RGB images. Consider setting config.norm_pix_loss to False.r4   r!   r"   r#   )%r'  r  rh  ra   r   rS   expandr_   r^   r`   r[   rb   r*   catri  rj  r!   no_gradrT   rh   r   	as_tensorr   r   rx   rv   norm_pix_lossr   r   r   r)  varsqrtr   r2   r"   r#   )rV   rc   rd   r   r   outputsrQ  rf   rg   rh   expanded_position_embeddingspos_emb_visiblepos_emb_maskx_fulldecoder_outputsr!   r4   framesr[   r   r)  r*  timer   r   rx   rv   frames_normvideos_patchlabelsloss_fcts                                  r0   ri   VideoMAEForPreTraining.forwardo  s   H $(==$
*9$
RX$
 "3311/B '6&;&;#
| "@AA'+'?'?'F'FzSUWY'Z'b'bco'p$'C'J'J'L'O'OWcWjWjqu'O'v$67GHPPQ[]_amn3DLLZY[]ij O=tQ]?]^def 26fFXFXYZF[1\ '']]_{{''1,% &,,$**'<=@@V[@\]acgijlprv]vwoo&:;>>fTY>Z[_aeghjnpt[tu%+d2<BLL9JlFE'+{{'?'?AWAW*L{{((L(  j(Z'	  1aAq!Q?JJLL(61Z?%G:U :-
: 	  &D(IIJJ2dJCHHJTQ  +//L(61Z?%G:U :-
:\I  ;;++q0$k   L(  j(Z'	  1aAq!Q?JJL%{{L(61Z?%G:U :-
:\I  +7*<*<'J<!/2:::r<XFQ T 9'+!//))	
 	
[ _s   6J	O::
P)rT   rj  rh  ri  rS   r'  rL   )r%   r&   r'   r(   rN   r   r   r*   r+   rS  r   r   r   r   r2   ri   r.   rl   rm   s   @r0   rf  rf  X  s{    " 
 -1	O
''O
 ))O
 ELL)	O

 +,O
 
&O
  O
r/   rf  z
    VideoMAE Model transformer with a video classification head on top (a linear layer on top of the average pooled hidden
    states of all tokens) e.g. for ImageNet.
    c                      ^  \ rS rSrU 4S jr\\   S
S\\R                     S\\R                     S\\R                     S\
\   S\4
S jj5       5       rS	rU =r$ )VideoMAEForVideoClassificationi  c                   > [         TU ]  U5        UR                  U l        [        U5      U l        UR
                  (       a   [        R                  " UR                  5      OS U l	        UR                  S:  a+  [        R                  " UR                  UR                  5      O[        R                  " 5       U l        U R                  5         g )Nr   )rM   rN   
num_labelsr<  r'  r?  r   r  rR   fc_normr   r]  
classifierrA  rU   s     r0   rN   'VideoMAEForVideoClassification.__init__
  s      ++%f- <B;R;Rr||F$6$67X\NTN_N_bcNc"))F$6$68I8IJikititiv 	r/   rc   r   r  r   r   c                 ^   U R                   " U4SU0UD6nUR                  nU R                  b#  UR                  S5      nU R                  U5      nO	USS2S4   nU R	                  U5      nSn	Ub  U R
                  " X8U R                  40 UD6n	[        U	UUR                  UR                  S9$ )a  
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
    config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
    `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

Examples:

```python
>>> import av
>>> import torch
>>> import numpy as np

>>> from transformers import AutoImageProcessor, VideoMAEForVideoClassification
>>> from huggingface_hub import hf_hub_download

>>> np.random.seed(0)


>>> def read_video_pyav(container, indices):
...     '''
...     Decode the video with PyAV decoder.
...     Args:
...         container (`av.container.input.InputContainer`): PyAV container.
...         indices (`list[int]`): List of frame indices to decode.
...     Returns:
...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
...     '''
...     frames = []
...     container.seek(0)
...     start_index = indices[0]
...     end_index = indices[-1]
...     for i, frame in enumerate(container.decode(video=0)):
...         if i > end_index:
...             break
...         if i >= start_index and i in indices:
...             frames.append(frame)
...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
...     '''
...     Sample a given number of frame indices from the video.
...     Args:
...         clip_len (`int`): Total number of frames to sample.
...         frame_sample_rate (`int`): Sample every n-th frame.
...         seg_len (`int`): Maximum allowed index of sample's last frame.
...     Returns:
...         indices (`list[int]`): List of sampled frame indices
...     '''
...     converted_len = int(clip_len * frame_sample_rate)
...     end_idx = np.random.randint(converted_len, seg_len)
...     start_idx = end_idx - converted_len
...     indices = np.linspace(start_idx, end_idx, num=clip_len)
...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
...     return indices


>>> # video clip consists of 300 frames (10 seconds at 30 FPS)
>>> file_path = hf_hub_download(
...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)

>>> # sample 16 frames
>>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
>>> video = read_video_pyav(container, indices)

>>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
>>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")

>>> inputs = image_processor(list(video), return_tensors="pt")

>>> with torch.no_grad():
...     outputs = model(**inputs)
...     logits = outputs.logits

>>> # model predicts one of the 400 Kinetics-400 classes
>>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label])
eating spaghetti
```r   Nr   r   rr  )
r'  r  r  r)  r  loss_functionrT   r   r"   r#   )
rV   rc   r   r  r   rz  rQ  r   r!   r4   s
             r0   ri   &VideoMAEForVideoClassification.forward  s    x $(==#]#]V\#]!33<<#$))!,F\\&)F$QT*F(%%fdkkLVLD$!//))	
 	
r/   )r  r  r  r'  )NNN)r%   r&   r'   r(   rN   r   r   r   r*   r   r   r   r   ri   r.   rl   rm   s   @r0   r  r    s      04,0)-	n
u||,n
 ELL)n
 &	n

 +,n
 
n
  n
r/   r  )rf  r<  r&  r  )r   )Er)   collections.abcrz   r\   r   dataclassesr   typingr   r   numpyr9   r*   r   torch.nnr   activationsr
   modeling_layersr   modeling_outputsr   r   modeling_utilsr   r   processing_utilsr   pytorch_utilsr   r   utilsr   r   r   r   utils.constantsr   r   utils.genericr   r   configuration_videomaer   
get_loggerr%   loggerr   r2   rG   ModulerI   rO   r   floatr   r   r   r   r   r   r  r  r&  r<  rU  rf  r  __all__r$   r/   r0   <module>r     sW   3   ! %     ! 9 F F & Q M M J A 2 
		H	% 
:K : : 
:; : : : B2bii 2z %II%<<% 
% <<	%
 U\\*% % %<9.BII 9.z $		 @299  
RYY 
. >@bii @ *o * *8 BB+ BB BBJ#4bii #4L 
c
4 c

c
L ~
%< ~
~
B sr/   