
    cCi.              
       (   S SK r S SKJr  S SKJrJrJr  S SKrSSKJ	r	  SSK
JrJr  SSKJrJr  SSKJrJrJrJr  SS	KJrJrJr  S
SKJr  \(       a  SSKJr  S SKJr  SSS\\ \\    4   S\!S\ S\4
S jr" " S S\5      r#\ " S S\5      5       r$S/r%g)    N)Iterable)TYPE_CHECKINGOptionalUnion   )BatchFeature)BaseImageProcessorFastDefaultFastImageProcessorKwargs)group_images_by_shapereorder_images)IMAGENET_STANDARD_MEANIMAGENET_STANDARD_STDPILImageResamplingSizeDict)
TensorTypeauto_docstringrequires_backends   )BeitImageProcessorFast)DepthEstimatorOutput)
functionalinput_imagetorch.Tensoroutput_sizekeep_aspect_ratiomultiplereturnc                     SS jnU R                   SS  u  pVUu  pxXu-  n	X-  n
U(       a#  [        SU
-
  5      [        SU	-
  5      :  a  U
n	OU	n
U" X-  US9nU" X-  US9n[        XS9$ )Nc                     [        X-  5      U-  nUb   XC:  a  [        R                  " X-  5      U-  nXB:  a  [        R                  " X-  5      U-  nU$ N)roundmathfloorceil)valr   min_valmax_valxs        ]/home/james-whalen/.local/lib/python3.13/site-packages/transformers/models/dpt/modular_dpt.pyconstrain_to_multiple_of>get_resize_output_image_size.<locals>.constrain_to_multiple_of4   sQ    #.!H,1;

3>*X5A;		#.)H4A       )r   heightwidth)r   N)shapeabsr   )r   r   r   r   r*   input_heightinput_widthoutput_heightoutput_widthscale_heightscale_width
new_height	new_widths                r)   get_resize_output_image_sizer<   .   s    	 !, 1 1"# 6L"-M !/L,Kq;#a,&6"77&L 'K),*EPXYJ()BXVI:77r,   c                   Z    \ rS rSr% Sr\\   \S'   \\   \S'   \\   \S'   \\   \S'   Sr	g)	DPTFastImageProcessorKwargsU   a  
ensure_multiple_of (`int`, *optional*, defaults to 1):
    If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overridden
    by `ensure_multiple_of` in `preprocess`.
size_divisor (`int`, *optional*):
    If `do_pad` is `True`, pads the image dimensions to be divisible by this value. This was introduced in the
    DINOv2 paper, which uses the model in combination with DPT.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
    If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can
    be overridden by `keep_aspect_ratio` in `preprocess`.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
    Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
    is used for background, and background itself is not included in all classes of a dataset (e.g.
    ADE20k). The background label will be replaced by 255.
ensure_multiple_ofsize_divisorr   do_reduce_labels N)
__name__
__module____qualname____firstlineno____doc__r   int__annotations__bool__static_attributes__rC   r,   r)   r>   r>   U   s1      !%3-~%tn$r,   r>   c            '          \ rS rSr\R
                  r\r\	r
SSS.rSrSrSrSrSrSrSrSrSrSrSr\r    S(S	S
S\S\S   S\S\\   S\SS
4S jjr S)S	S
S\SS
4S jjrS\S
   S\S\S\S\S   S\S\S\S\S\S\\ \\\   4      S\\ \\\   4      S\S\\   S\S\\   S \\   S!\\ \!\"4      S\#4&S" jr$ S*S#S$S%\\ \"\\%\\4      S4      S\\&\!\"4      4S& jjr'S'r(g)+DPTImageProcessorFastl   i  r/   TFgp?r.   Nimager   sizeinterpolationzF.InterpolationMode	antialiasr@   r   r   c                     UR                   (       a  UR                  (       d  [        SUR                  5        35      e[	        UUR                   UR                  4UUS9n[
        R                  " XXsUS9$ )a  
Resize an image to `(size["height"], size["width"])`.

Args:
    image (`torch.Tensor`):
        Image to resize.
    size (`SizeDict`):
        Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
    interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
        `InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
    antialias (`bool`, *optional*, defaults to `True`):
        Whether to use antialiasing when resizing the image
    ensure_multiple_of (`int`, *optional*):
        If `do_resize` is `True`, the image is resized to a size that is a multiple of this value
    keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
        If `True`, and `do_resize` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.

Returns:
    `torch.Tensor`: The resized image.
zDThe size dictionary must contain the keys 'height' and 'width'. Got )r   r   r   )rR   rS   )r0   r1   
ValueErrorkeysr<   r	   resize)selfrP   rQ   rR   rS   r@   r   r   s           r)   rW   DPTImageProcessorFast.resize   sn    : {{$**cdhdmdmdocpqrr2djj1/'	
 &,,Y
 	
r,   rA   c                     UR                   SS u  p4S nU" X25      u  pgU" XB5      u  pXX4n
[        R                  " X5      $ )aL  
Center pad a batch of images to be a multiple of `size_divisor`.

Args:
    image (`torch.Tensor`):
        Image to pad.  Can be a batch of images of dimensions (N, C, H, W) or a single image of dimensions (C, H, W).
    size_divisor (`int`):
        The width and height of the image will be padded to a multiple of this number.
r-   Nc                 X    [         R                  " X-  5      U-  nX -
  nUS-  nX4-
  nXE4$ )Nr   )r"   r$   )rQ   rA   new_sizepad_sizepad_size_leftpad_size_rights         r)   _get_pad1DPTImageProcessorFast.pad_image.<locals>._get_pad   s9    yy!45DHH$MM%5N 00r,   )r2   Fpad)rX   rP   rA   r0   r1   r`   pad_top
pad_bottompad_left	pad_rightpaddings              r)   	pad_imageDPTImageProcessorFast.pad_image   sP     BC(	1 'v<&u;i<uuU$$r,   imagesrB   	do_resizedo_center_crop	crop_size
do_rescalerescale_factordo_normalize
image_mean	image_stddo_paddisable_groupingreturn_tensorsc           	      ,   U(       a  U R                  U5      n[        UUS9u  nn0 nUR                  5        H%  u  nnU(       a  U R                  UUUUUS9nUUU'   M'     [	        UU5      n[        UUS9u  nn0 nUR                  5        HQ  u  nnU(       a  U R                  UU5      nU(       a  U R                  UU5      nU R                  UXXU5      nUUU'   MS     [	        UU5      nU(       a  [        R                  " USS9OUn[        SU0S9$ )N)ru   )rP   rQ   rR   r@   r   r   )dimpixel_values)data)reduce_labelr   itemsrW   r   center_cropri   rescale_and_normalizetorchstackr   )rX   rk   rB   rl   rQ   rR   rm   rn   ro   rp   rq   rr   rs   r   r@   rt   rA   ru   rv   kwargsgrouped_imagesgrouped_images_indexresized_images_groupedr2   stacked_imagesresized_imagesprocessed_images_groupedprocessed_imagess                               r)   _preprocess!DPTImageProcessorFast._preprocess   sI   , &&v.F 0EV^n/o,,!#%3%9%9%;!E>!%("/'9&7 "- " -;"5) &< ((>@TU 0E^fv/w,,#% %3%9%9%;!E>!%!1!1.)!L!%!M!77
LV_N /=$U+ &< **BDXYCQ5;;'7Q?Wg.2B!CDDr,   outputsr   target_sizesc                    [        U S5        UR                  nUb#  [        U5      [        U5      :w  a  [        S5      e/ nUc  S/[        U5      -  OUn[	        X25       Hq  u  pVUbV  [
        R                  R                  R                  UR                  S5      R                  S5      USSS9R                  5       nUR                  S	U05        Ms     U$ )
aj  
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.

Args:
    outputs ([`DepthEstimatorOutput`]):
        Raw outputs of the model.
    target_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*):
        Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
        (height, width) of each image in the batch. If left to None, predictions will not be resized.

Returns:
    `List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
    predictions.
r   Nz]Make sure that you pass in as many target sizes as the batch dimension of the predicted depthr   r.   bicubicF)rQ   modealign_cornerspredicted_depth)r   r   lenrU   zipr   nnr   interpolate	unsqueezesqueezeappend)rX   r   r   r   resultsdepthtarget_sizes          r)   post_process_depth_estimation3DPTImageProcessorFast.post_process_depth_estimation  s    ( 	$(!11$3+?3|CT+To  8D8LvO 44R^"%o"DE&++77OOA&003+Iej 8 ')  NN-u56 #E r,   rC   )NTr.   F)r.   r    ))rD   rE   rF   rG   r   BICUBICresampler   rr   r   rs   rQ   rl   ro   rq   rt   rp   r@   r   rB   rn   rm   r>   valid_kwargsr   r   rK   rI   rW   ri   listfloatr   strr   r   r   tupledictr   rL   rC   r,   r)   rN   rN   l   sc   !))H'J%IC(DIJLFNIN.L :>,-"'(
(
 (
   56	(

 (
 %SM(
  (
 
(
Z %% % 
	%89E^$9E 9E 	9E
 9E   569E 9E 9E 9E 9E 9E U5$u+#5679E E%e"4569E  9E %SM9E  !9E" sm#9E$ #4.%9E& !sJ!78'9E* 
+9E| RV''' uZeCHo1F%LMN' 
d3
?#	$	' 'r,   rN   )&r"   collections.abcr   typingr   r   r   r   image_processing_baser   image_processing_utils_fastr	   r
   image_transformsr   r   image_utilsr   r   r   r   utilsr   r   r   beit.image_processing_beit_fastr   modeling_outputsr   torchvision.transforms.v2r   rb   rI   rK   r<   r>   rN   __all__rC   r,   r)   <module>r      s   "  $ 1 1  1 b E  
 E 8 5$8$8sHSM)*$8 $8 	$8
 $8N%"A %. {2 { {| #
#r,   