
    cCi\                     6   S r SSKJrJr  SSKrSSKJrJrJ	r	  SSK
JrJrJrJr  SSKJrJrJrJrJrJrJrJrJr  SSKJrJrJrJrJrJr  SS	K J!r!  \" 5       (       a  SSK"r"\" 5       (       a  SSK#r#\RH                  " \%5      r&\!" S
S9 " S S\5      5       r'S/r(g)z$Image processor class for MobileViT.    )OptionalUnionN   )BaseImageProcessorBatchFeatureget_size_dict)flip_channel_orderget_resize_output_image_sizeresizeto_channel_dimension_format)	ChannelDimension
ImageInputPILImageResamplinginfer_channel_dimension_formatis_scaled_imagemake_flat_list_of_imagesto_numpy_arrayvalid_imagesvalidate_preprocess_arguments)
TensorTypefilter_out_non_signature_kwargsis_torch_availableis_torch_tensoris_vision_availablelogging)requires)vision)backendsc            !          ^  \ rS rSrSrS/rSS\R                  SSSSSS4	S\S	\	\
\\4      S
\S\S\\\4   S\S\	\
\\4      S\S\SS4U 4S jjjr\R                  SS4S\R"                  S	\
\\4   S
\S\	\\\4      S\	\\\4      S\R"                  4S jjr  S&S\R"                  S\	\\\4      S\	\\\4      S\R"                  4S jjrS\S\R"                  4S jrS'U 4S jjr     S(S\S\S\S\S\S\S	\	\
\\4      S
\	\   S\	\   S\	\
\\4      S\	\\\4      4S jjr          S)S\S\	\   S	\	\
\\4      S
\	\   S\	\   S\	\   S\	\   S\	\
\\4      S\	\   S\	\\\4      S\	\\\4      S\R"                  4S jjr      S*S\S\	\   S\	\   S	\	\
\\4      S\	\   S\	\
\\4      S\	\\\4      S\R"                  4S jjr\" 5       SSSSSSSSSSS\R8                  S4S\S \	\   S\	\   S	\	\
\\4      S
\	\   S\	\   S\	\   S\	\   S\	\
\\4      S\	\   S\	\   S!\	\\\4      S\S\	\\\4      S\R>                  R>                  4S" jj5       r S'S#\	\!\"      4S$ jjr#S%r$U =r%$ )+MobileViTImageProcessor7   ad	  
Constructs a MobileViT image processor.

Args:
    do_resize (`bool`, *optional*, defaults to `True`):
        Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
        `do_resize` parameter in the `preprocess` method.
    size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
        Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the
        `preprocess` method.
    resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
        Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
        in the `preprocess` method.
    do_rescale (`bool`, *optional*, defaults to `True`):
        Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
        parameter in the `preprocess` method.
    rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
        Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
        `preprocess` method.
    do_center_crop (`bool`, *optional*, defaults to `True`):
        Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
        image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in
        the `preprocess` method.
    crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
        Desired output size `(size["height"], size["width"])` when applying center-cropping. Can be overridden by
        the `crop_size` parameter in the `preprocess` method.
    do_flip_channel_order (`bool`, *optional*, defaults to `True`):
        Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
        parameter in the `preprocess` method.
    do_reduce_labels (`bool`, *optional*, defaults to `False`):
        Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
        used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
        background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
        `preprocess` method.
pixel_valuesTNgp?F	do_resizesizeresample
do_rescalerescale_factordo_center_crop	crop_sizedo_flip_channel_orderdo_reduce_labelsreturnc
                    > [         TU ]  " S	0 U
D6  Ub  UOSS0n[        USS9nUb  UOSSS.n[        USS9nXl        X l        X0l        X@l        XPl        X`l        Xpl	        Xl
        Xl        g )
Nshortest_edge   Fdefault_to_square   )heightwidthr)   
param_name )super__init__r   r#   r$   r%   r&   r'   r(   r)   r*   r+   )selfr#   r$   r%   r&   r'   r(   r)   r*   r+   kwargs	__class__s              r/home/james-whalen/.local/lib/python3.13/site-packages/transformers/models/mobilevit/image_processing_mobilevit.pyr9    MobileViTImageProcessor.__init___   s     	"6"'tos-CTU;!*!6IsUX<Y	!)D	"	 $,,"%:" 0    imagedata_formatinput_data_formatc                     SnSU;   a  US   nSnO"SU;   a  SU;   a  US   US   4nO[        S5      e[        UUUUS9n[        U4UUUUS.UD6$ )	a  
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.

Args:
    image (`np.ndarray`):
        Image to resize.
    size (`dict[str, int]`):
        Size of the output image.
    resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
        Resampling filter to use when resiizing the image.
    data_format (`str` or `ChannelDimension`, *optional*):
        The channel dimension format of the image. If not provided, it will be the same as the input image.
    input_data_format (`ChannelDimension` or `str`, *optional*):
        The channel dimension format of the input image. If not provided, it will be inferred.
Tr.   Fr3   r4   zASize must contain either 'shortest_edge' or 'height' and 'width'.)r$   r1   rB   )r$   r%   rA   rB   )
ValueErrorr
   r   )	r:   r@   r$   r%   rA   rB   r;   r1   output_sizes	            r=   r   MobileViTImageProcessor.resize}   s    2 !d"(D %'T/NDM2D`aa2//	
 
#/
 
 	
r?   c                     [        XUS9$ )a  
Flip the color channels from RGB to BGR or vice versa.

Args:
    image (`np.ndarray`):
        The image, represented as a numpy array.
    data_format (`ChannelDimension` or `str`, *optional*):
        The channel dimension format of the image. If not provided, it will be the same as the input image.
    input_data_format (`ChannelDimension` or `str`, *optional*):
        The channel dimension format of the input image. If not provided, it will be inferred.
)rA   rB   )r	   )r:   r@   rA   rB   s       r=   r	   *MobileViTImageProcessor.flip_channel_order   s    " "%Teffr?   labelc                 B    [        U5      nSXS:H  '   US-
  nSXS:H  '   U$ )N   r         )r   )r:   rI   s     r=   reduce_label$MobileViTImageProcessor.reduce_label   s2    u%qj	!slr?   c                 *   > [         TU ]  " U4SU0UD6$ )z
Preprocesses a batch of images and optionally segmentation maps.

Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
segmentation_maps)r8   __call__)r:   imagesrQ   r;   r<   s       r=   rR    MobileViTImageProcessor.__call__   s      wV:KVvVVr?   c                     U(       a  U R                  U5      nU(       a  U R                  XXS9nU(       a  U R                  XUS9nU(       a  U R                  XUS9nU(       a  U R	                  XS9nU$ )N)r@   r$   r%   rB   )r@   scalerB   )r@   r$   rB   )rB   )rN   r   rescalecenter_cropr	   )r:   r@   r+   r#   r&   r(   r*   r$   r%   r'   r)   rB   s               r=   _preprocess#MobileViTImageProcessor._preprocess   sv     %%e,EKKeKoELLuVgLhE$$5Te$fE ++E+WEr?   c                     [        U5      nU(       a%  [        U5      (       a  [        R                  S5        Uc  [	        U5      nU R                  USUUUUUUUU	US9n[        XUS9nU$ )zPreprocesses a single image.zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.F)r@   r+   r#   r$   r%   r&   r'   r(   r)   r*   rB   )input_channel_dim)r   r   loggerwarning_oncer   rY   r   )r:   r@   r#   r$   r%   r&   r'   r(   r)   r*   rA   rB   s               r=   _preprocess_image)MobileViTImageProcessor._preprocess_image   s      u%/%00s $ >u E  "!))"7/ ! 
 ,ERcdr?   segmentation_mapc                 D   [        U5      nUR                  S:X  a  SnUS   n[        R                  nOSnUc
  [	        USS9nU R                  UUUU[        R                  SUUSUS9
nU(       a  UR                  S5      nUR                  [        R                  5      nU$ )	zPreprocesses a single mask.   T)N.FrL   )num_channels)
r@   r+   r#   r$   r%   r&   r(   r)   r*   rB   r   )r   ndimr   FIRSTr   rY   r   NEARESTsqueezeastypenpint64)	r:   ra   r+   r#   r$   r(   r)   rB   added_channel_dims	            r=   _preprocess_mask(MobileViTImageProcessor._preprocess_mask  s     **:;  A% $/	: 0 6 6 % ($BCSbc$d!++"-'//)"'/ , 
 /77:+22288<r?   rS   rQ   return_tensorsc                    Ub  UOU R                   nUb  UOU R                  nUb  UOU R                  nUb  UOU R                  nUb  UOU R                  nU
b  U
OU R
                  n
Ub  UOU R                  n[        USS9nU	b  U	OU R                  n	[        U	SS9n	Ub  UOU R                  n[        U5      nUb
  [        USS9n[        U5      n[        U5      (       d  [        S5      eUb  [        U5      (       d  [        S5      e[        UUUU	UUUS	9  U Vs/ s H  nU R                  UUUUUUUU	U
UUS
9PM     nnSU0nUb*  U Vs/ s H  nU R                  UUUUUU	US9PM     nnUUS'   [!        UUS9$ s  snf s  snf )a  
Preprocess an image or batch of images.

Args:
    images (`ImageInput`):
        Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
        passing in images with pixel values between 0 and 1, set `do_rescale=False`.
    segmentation_maps (`ImageInput`, *optional*):
        Segmentation map to preprocess.
    do_resize (`bool`, *optional*, defaults to `self.do_resize`):
        Whether to resize the image.
    size (`dict[str, int]`, *optional*, defaults to `self.size`):
        Size of the image after resizing.
    resample (`int`, *optional*, defaults to `self.resample`):
        Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
        has an effect if `do_resize` is set to `True`.
    do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
        Whether to rescale the image by rescale factor.
    rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
        Rescale factor to rescale the image by if `do_rescale` is set to `True`.
    do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
        Whether to center crop the image.
    crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
        Size of the center crop if `do_center_crop` is set to `True`.
    do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
        Whether to flip the channel order of the image.
    do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
        Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
        is used for background, and background itself is not included in all classes of a dataset (e.g.
        ADE20k). The background label will be replaced by 255.
    return_tensors (`str` or `TensorType`, *optional*):
        The type of tensors to return. Can be one of:
            - Unset: Return a list of `np.ndarray`.
            - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
            - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
            - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
            - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
    data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
        The channel dimension format for the output image. Can be one of:
            - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
            - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
    input_data_format (`ChannelDimension` or `str`, *optional*):
        The channel dimension format for the input image. If unset, the channel dimension format is inferred
        from the input image. Can be one of:
        - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
        - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
        - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Fr0   r)   r5   rc   )expected_ndimszkInvalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.zvInvalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.)r&   r'   r(   r)   r#   r$   r%   )r@   r#   r$   r%   r&   r'   r(   r)   r*   rA   rB   r"   )ra   r+   r#   r$   r(   r)   rB   labels)datatensor_type)r#   r%   r&   r'   r(   r*   r$   r   r)   r+   r   r   rD   r   r_   rm   r   )r:   rS   rQ   r#   r$   r%   r&   r'   r(   r)   r*   r+   ro   rA   rB   imgrs   ra   s                     r=   
preprocess"MobileViTImageProcessor.preprocessE  s    D "+!6IDNN	'38#-#9Zt
+9+E4K^K^+9+E4K^K^%:%F!DLfLf 	 'tTYYTU;!*!6IDNN	!)D	/?/K+QUQfQf)&1( 89J[\ ])&1F##: 
 (>O1P1P: 
 	&!))	
0 
  ""#!%--#&;'"3 #   	 
" '( ):! ):$ %%%5%5'#1'&7 &  ):  ! /DN>BBG
(!s   #E=Ftarget_sizesc                     UR                   nUb  [        U5      [        U5      :w  a  [        S5      e[        U5      (       a  UR	                  5       n/ n[        [        U5      5       Ha  n[        R                  R                  R                  X5   R                  SS9X%   SSS9nUS   R                  SS9nUR                  U5        Mc     U$ UR                  SS9n[        UR                  S   5       Vs/ s H  oU   PM	     nnU$ s  snf )a  
Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.

Args:
    outputs ([`MobileViTForSemanticSegmentation`]):
        Raw outputs of the model.
    target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
        List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
        predictions will not be resized.

Returns:
    semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
    segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
    specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
zTMake sure that you pass in as many target sizes as the batch dimension of the logitsr   )dimbilinearF)r$   modealign_cornersrL   )logitslenrD   r   numpyrangetorchnn
functionalinterpolate	unsqueezeargmaxappendshape)	r:   outputsrx   r~   semantic_segmentationidxresized_logitssemantic_mapis	            r=   "post_process_semantic_segmentation:MobileViTImageProcessor.post_process_semantic_segmentation  s"   "  #6{c,// j  |,,+113$&!S[)!&!4!4!@!@K))a)0|7Hzin "A "  .a077A7>%,,\: * %$ %+MMaM$8!GLMbMhMhijMkGl$mGl!1%=Gl!$m$$ %ns   9D)	r)   r(   r*   r+   r&   r#   r%   r'   r$   )NN)N)NNNNN)
NNNNNNNNNN)NNNNNN)&__name__
__module____qualname____firstlineno____doc__model_input_namesr   BILINEARboolr   dictstrintr   floatr9   rj   ndarrayr   r   r	   r   rN   rR   rY   r_   rm   r   rf   r   PILImagerv   listtupler   __static_attributes____classcell__)r<   s   @r=   r    r    7   s   "H (( )-'9'B'B,3#.2&*!&11 tCH~&1 %	1
 1 c5j)1 1 DcN+1  $1 1 
1 1D (:'B'B>BDH/
zz/
 38n/
 %	/

 eC)9$9:;/
 $E#/?*?$@A/
 
/
h ?CDH	gzzg eC)9$9:;g $E#/?*?$@A	g
 
g(*  W" *.15*..2DH  	
    $ tCH~& -. ! DcN+ $E#/?*?$@AD %))-15%)*.)-.204>BDH)) D>) tCH~&	)
 -.) TN) !) !) DcN+)  (~) eC)9$9:;) $E#/?*?$@A) 
)\ ,0$()-)-.2DH& $&  #4.&  D>	& 
 tCH~&&  !&  DcN+&  $E#/?*?$@A&  
& P %& 37$()-15%)*.)-.204+/;?(8(>(>DHQCQC $J/QC D>	QC
 tCH~&QC -.QC TNQC !QC !QC DcN+QC  (~QC #4.QC !sJ!78QC &QC $E#/?*?$@AQC  
!QC 'QCh)%QUV[Q\H] )% )%r?   r    ))r   typingr   r   r   rj   image_processing_utilsr   r   r   image_transformsr	   r
   r   r   image_utilsr   r   r   r   r   r   r   r   r   utilsr   r   r   r   r   r   utils.import_utilsr   r   r   
get_loggerr   r]   r    __all__r7   r?   r=   <module>r      s    + "  U U u u
 
 
  +  
		H	% 
;K%0 K%  K%\ %
%r?   