
    Z ja                        S r SSKJr  SSKJr  SSKJr  SSKJrJ	r	  SSK
JrJrJr  SSKJrJr  SS	KJrJrJrJr  \(       a  SS
KJr  SSKrSSKJr  \R8                  " \5      r " S S\SS9r\ " S S\5      5       r S/r!g)zImage processor class for GLPN.    )TYPE_CHECKING   )TorchvisionBackend)BatchFeature)group_images_by_shapereorder_images)
ImageInputPILImageResamplingSizeDict)ImagesKwargsUnpack)
TensorTypeauto_docstringloggingrequires_backends)DepthEstimatorOutputN)
functionalc                   $    \ rS rSr% Sr\\S'   Srg)GLPNImageProcessorKwargs(   z
size_divisor (`int`, *optional*, defaults to 32):
    When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest
    multiple of `size_divisor`.
size_divisor N)__name__
__module____qualname____firstlineno____doc__int__annotations____static_attributes__r       /root/GenerationalWealth/GenerationalWealth/venv/lib/python3.13/site-packages/transformers/models/glpn/image_processing_glpn.pyr   r   (   s     r!   r   F)totalc            #         ^  \ rS rSrSr\rSrSrSr	\
R                  rSrS\\   4U 4S jjrU 4S jr\S	\S\\   S
\4U 4S jj5       r S&SSS\SSS\S
S4
U 4S jjjr S&S	\S   S\S\SSS\S\S\S\S\S\\\   -  S-  S\\\   -  S-  S\S-  S\S-  S\S-  S\\-  S-  S\S
\4"S  jjr S'S!S"S#\\\\\4      -  S-  S
\\\\4      4S$ jjr S%r!U =r"$ )(GLPNImageProcessor2   z6Torchvision backend for GLPN with size_divisor resize.Tgp?    kwargsc                 &   > [         TU ]  " S0 UD6  g )Nr   )super__init__selfr(   	__class__s     r"   r+   GLPNImageProcessor.__init__>   s    "6"r!   c                 H   > UR                  SS 5        [        TU ]  " S0 UD6$ )N	do_resizer   )popr*   _validate_preprocess_kwargsr,   s     r"   r3   .GLPNImageProcessor._validate_preprocess_kwargsA   s$    

;%w2<V<<r!   imagesreturnc                 &   > [         TU ]  " U40 UD6$ N)r*   
preprocess)r-   r5   r(   r.   s      r"   r9   GLPNImageProcessor.preprocessF   s    w!&3F33r!   imageztorch.Tensorsizeresamplez7PILImageResampling | tvF.InterpolationMode | int | Noner   c                 x   > UR                   SS u  pgXd-  U-  nXt-  U-  n	[        T
U ]  " U[        XS94SU0UD6$ )zTResize so height and width are rounded down to the closest multiple of size_divisor.N)heightwidthr=   )shaper*   resizer   )r-   r;   r<   r=   r   r(   r@   rA   new_hnew_wr.   s             r"   rC   GLPNImageProcessor.resizeJ   sa     BC(&5%4w~E/
 
 	
 	
r!   r1   do_center_crop	crop_size
do_rescalerescale_factordo_normalize
image_meanN	image_stddo_padpad_sizedisable_groupingreturn_tensorsc           	          [        XS9u  nn0 nUR                  5        H7  u  nnU(       a  U R                  UX4US9nU R                  UXxXU5      nUUU'   M9     [	        UU5      n[        SU0US9$ )zCustom preprocessing for GLPN.)rP   )r   pixel_values)datatensor_type)r   itemsrC   rescale_and_normalizer   r   )r-   r5   r1   r<   r=   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   r   r(   grouped_imagesgrouped_images_indexprocessed_images_groupedrB   stacked_imagesprocessed_imagess                           r"   _preprocessGLPNImageProcessor._preprocess]   s    * 0EV/o,,#% %3%9%9%;!E>!%^TZf!g!77
LV_N /=$U+ &< **BDXY.2B!CQ_``r!   outputsr   target_sizesc                    [        U S5        UR                  nUb#  [        U5      [        U5      :w  a  [        S5      e/ nUc  S/[        U5      -  OUn[	        X25       Hi  u  pVUbN  US   n[
        R                  R                  R                  XVSSS9nUR                  S5      R                  S5      nUR                  S	U05        Mk     U$ )
zN
Convert raw model outputs to final depth predictions.
Only supports PyTorch.
torchNz]Make sure that you pass in as many target sizes as the batch dimension of the predicted depth)NN.bicubicF)r<   modealign_cornersr   predicted_depth)r   rf   len
ValueErrorziprb   nnr   interpolatesqueezeappend)r-   r_   r`   rf   resultsdepthtarget_sizes          r"   post_process_depth_estimation0GLPNImageProcessor.post_process_depth_estimation~   s     	$(!11#O(<L@Q(Qo  8D8LvO 44R^"%o"DE&o.++77V_ot7ua(003NN-u56 #E r!   r   )r'   r8   )#r   r   r   r   r   r   valid_kwargsr1   rI   rJ   r
   BILINEARr=   r   r   r+   r3   r   r	   r   r9   r   r   rC   listboolfloatstrr   r]   tupledictrq   r    __classcell__)r.   s   @r"   r%   r%   2   s   @+LIJN!**HL#(@!A #=
 4 4v>V7W 4\h 4 4 

 
 L	

 
 

 
H #a^$a a 	a
 La a a a a a DK'$.a 4;&-a ta T/a +a  j(4/!a" #a& 
'aH CG' !4c3h#884? 
d3
?#	$	 r!   r%   )"r   typingr   image_processing_backendsr   image_processing_utilsr   image_transformsr   r   image_utilsr	   r
   r   processing_utilsr   r   utilsr   r   r   r   modeling_outputsr   rb   torchvision.transforms.v2r   tvF
get_loggerr   loggerr   r%   __all__r   r!   r"   <module>r      s    &   ; 2 E 
 5 K K 8  7 
		H	%|5  b+ b bJ  
 r!   