
    Z js                     ,   S SK r S SKJr  S SKJr  S SKJr  S SKJr  S SK	r
S SKJr  SSKJrJr  SS	KJrJrJr  SS
KJrJrJrJrJrJr  SSKJrJr  SSKJrJ r J!r!J"r"  \!" 5       (       a  SSKJ#r#  \ " 5       (       a  S SK$J%r&  \"RN                  " \(5      r)SS/r* " S S\5      r+SS1S1SS1S1SS14r,S r-    S%S\.\\.   -  S-  S\.S-  S\/S\/S\0\1\.4   4
S jjr2     S&S\.\\.   -  \0\1\.4   -  \-  S-  S\.S-  S\/S\/S\04
S  jjr3S!\4S"\5S\44S# jr6S$ r7g)'    N)Iterable)deepcopy)partial)Any)validate_typed_dict   )BatchFeatureImageProcessingMixin)center_crop	normalizerescale)ChannelDimension
ImageInputSizeDictget_image_sizemake_flat_list_of_imagesvalidate_preprocess_arguments)ImagesKwargsUnpack)auto_docstringis_torchvision_availableis_vision_availablelogging)PILImageResampling)
functionalprocessor_classimage_processor_typec                     ^  \ rS rSrSr\rSrSrS/r	S\
\   4U 4S jjrS rS	\S\
\   S
\4S jrS rS r S0S	\S\S
\4S jjrSS.S	\S\S\
\   S
\\   4S jjrS	\S\
\   S
\4S jr      S1S\\\   -  \\\4   -  \-  S-  S\\\   -  \\\4   -  \-  S-  S\\\   -  \\\4   -  \-  S-  S\S-  S\\\   -  S-  S\\\   -  S-  S
\4S jjr\r          S2S\S-  S\S-  S\S-  S\\ \   -  S-  S\\ \   -  S-  S\S-  S\S-  S \S-  S\S-  S!S"4S# jjr!\"S	\S\
\   S
\4S$ j5       r#S
\\\4   4U 4S% jjr$  S3S&\%RL                  S'\S(\\'-  S-  S)\\'-  S-  S
\%RL                  4
S* jjr(  S3S&\%RL                  S+\\\   -  S,\\\   -  S(\\'-  S-  S)\\'-  S-  S
\%RL                  4S- jjr)  S3S&\%RL                  S\\\4   S(\\'-  S-  S)\\'-  S-  S
\%RL                  4
S. jjr*S/r+U =r,$ )4BaseImageProcessor<   u  
Base class for image processors with an inheritance-based backend architecture.

This class defines the preprocessing pipeline: kwargs validation, input preparation, and dispatching to the
backend's `_preprocess` method. Backend subclasses (`TorchvisionBackend`, `PilBackend`) inherit from this class
and implement the actual image operations (resize, crop, rescale, normalize, etc.). Model-specific image
processors then inherit from the appropriate backend class.

Architecture Overview
---------------------

The class hierarchy is:

    BaseImageProcessor (this class)
    ├── TorchvisionBackend    (GPU-accelerated, torch.Tensor)
    │   └── ModelImageProcessor (e.g. LlavaNextImageProcessor)
    └── PilBackend            (portable CPU, np.ndarray)
        └── ModelImageProcessorPil (e.g. CLIPImageProcessorPil)

The preprocessing flow is:

    __call__() → preprocess() → _preprocess_image_like_inputs() → _prepare_image_like_inputs()
                                                                   (calls process_image per image)
                                                                 → _preprocess()
                                                                   (batch operations: resize, crop, etc.)

- `process_image`: Implemented by backends. Converts a single raw input (PIL, NumPy, or Tensor) to the
  backend's working format (torch.Tensor or np.ndarray), handles RGB conversion and channel reordering.
- `_preprocess`: Implemented by backends. Performs the actual batch processing (resize, center crop, rescale,
  normalize, pad) and returns a `BatchFeature`.

Basic Implementation
--------------------

For processors that only need standard operations (resize, center crop, rescale, normalize), inherit from
a backend and define class attributes:

    from transformers.image_processing_backends import PilBackend

    class MyImageProcessorPil(PilBackend):
        resample = PILImageResampling.BILINEAR
        image_mean = IMAGENET_DEFAULT_MEAN
        image_std = IMAGENET_DEFAULT_STD
        size = {"height": 224, "width": 224}
        do_resize = True
        do_rescale = True
        do_normalize = True

The backend's `_preprocess` method handles the standard pipeline automatically.

Custom Processing
-----------------

For processors that need custom logic (e.g., patch-based processing, multiple input types), override
`_preprocess` in your model-specific processor. The `_preprocess` method receives already-prepared images
(converted to the backend format with channels-first ordering) and performs the actual processing:

    class MyImageProcessor(TorchvisionBackend):
        def _preprocess(self, images, do_resize, size, do_normalize, image_mean, image_std, **kwargs):
            # Group images by shape for efficient batched operations
            grouped_images, grouped_images_index = group_images_by_shape(images)
            processed_groups = {}
            for shape, stacked_images in grouped_images.items():
                if do_resize:
                    stacked_images = self.resize(stacked_images, size=size)
                if do_normalize:
                    stacked_images = self.normalize(stacked_images, mean=image_mean, std=image_std)
                processed_groups[shape] = stacked_images
            processed_images = reorder_images(processed_groups, grouped_images_index)
            return BatchFeature(data={"pixel_values": processed_images})

For processors handling multiple input types (e.g., images + segmentation maps), override
`_preprocess_image_like_inputs`:

    def _preprocess_image_like_inputs(
        self,
        images: ImageInput,
        segmentation_maps: ImageInput | None = None,
        **kwargs,
    ) -> BatchFeature:
        images = self._prepare_image_like_inputs(images, **kwargs)
        batch_feature = self._preprocess(images, **kwargs)

        if segmentation_maps is not None:
            maps = self._prepare_image_like_inputs(segmentation_maps, **kwargs)
            batch_feature["labels"] = self._preprocess(maps, **kwargs).pixel_values

        return batch_feature

Extending Backend Behavior
--------------------------

To customize operations for a specific backend, subclass the backend and override its methods:

    from transformers.image_processing_backends import TorchvisionBackend, PilBackend

    class MyTorchvisionProcessor(TorchvisionBackend):
        def resize(self, image, size, **kwargs):
            # Custom resize logic for torchvision
            return super().resize(image, size, **kwargs)

    class MyPilProcessor(PilBackend):
        def resize(self, image, size, **kwargs):
            # Custom resize logic for PIL
            return super().resize(image, size, **kwargs)

Custom Parameters
-----------------

To add parameters beyond `ImagesKwargs`, create a custom kwargs class and set it as `valid_kwargs`:

    class MyImageProcessorKwargs(ImagesKwargs):
        custom_param: int | None = None

    class MyImageProcessor(TorchvisionBackend):
        valid_kwargs = MyImageProcessorKwargs
        custom_param = 10  # default value

Key Notes
---------

- Backend selection is done at the class level: inherit from `TorchvisionBackend` or `PilBackend`
- Backends receive images as `torch.Tensor` (Torchvision) or `np.ndarray` (PIL), always channels-first
- All images have channel dimension first during processing, regardless of backend
- Arguments not provided by users default to class attribute values
- Backend classes encapsulate backend-specific logic (resize, normalize, etc.) and can be overridden
Tgp?pixel_valueskwargsc                 &   > [         TU ]  " S0 UD6  g )N )super__init__)selfr"   	__class__s     t/root/GenerationalWealth/GenerationalWealth/venv/lib/python3.13/site-packages/transformers/image_processing_utils.pyr&   BaseImageProcessor.__init__   s    "6"    c                 x   0 nU R                   R                   H5  nUR                  US5      nUb  XBU'   M  [        [	        XS5      5      X#'   M7     U R
                  " S0 UD6nUR                  5        H  u  p5[        XU5        M     [        U R                   R                  R                  5       5      U l
        g)z^Resolve and set instance attributes from kwargs and class-level defaults for all valid kwargs.Nr$   )valid_kwargs__annotations__popr   getattr_standardize_kwargsitemssetattrlistkeys_valid_kwargs_names)r'   r"   
attributeskeykwargvalues         r)   _set_attributes"BaseImageProcessor._set_attributes   s    
$$44CJJsD)E "'3"*74d+C"D
 5 --;
;
$**,JCDu% - $((9(9(I(I(N(N(P#Q r+   imagesreturnc                 .    U R                   " U/UQ70 UD6$ )z)Preprocess an image or a batch of images.)
preprocessr'   r=   argsr"   s       r)   __call__BaseImageProcessor.__call__   s    v7777r+   c                     [         e)a\  
Process a single raw image into the backend's working format.

Implemented by backend subclasses (`TorchvisionBackend`, `PilBackend`). Converts a raw input
(PIL Image, NumPy array, or torch Tensor) to the backend's internal format (`torch.Tensor` for
Torchvision, `np.ndarray` for PIL), handles RGB conversion and ensures channels-first ordering.
NotImplementedErrorr'   rB   r"   s      r)   process_image BaseImageProcessor.process_image   s
     "!r+   c                     [         e)a  
Perform the actual batch image preprocessing (resize, center crop, rescale, normalize, pad).

Implemented by backend subclasses (`TorchvisionBackend`, `PilBackend`). Receives a list of
already-prepared images (in the backend's format, channels-first) and applies the configured
preprocessing operations. Returns a `BatchFeature` with the processed pixel values.

Model-specific processors can override this method to implement custom preprocessing logic
(e.g., patch-based processing in LLaVA-NeXT).
rF   rH   s      r)   _preprocessBaseImageProcessor._preprocess   s
     "!r+      expected_ndimsc                 6    U R                  U5      n[        XS9$ )z
Prepare the images structure for processing.

Args:
    images (`ImageInput`):
        The input images to process.

Returns:
    `ImageInput`: The images with a valid nesting.
rO   )fetch_imagesr   )r'   r=   rO   s      r)   _prepare_images_structure,BaseImageProcessor._prepare_images_structure   s     ""6*'NNr+   rQ   c          
      f   U R                  XS9n[        U R                  /UQ70 UD6n[        U5      S:  =(       a    [	        US   [
        [        -  5      nU(       a+  U VVs/ s H  ow Vs/ s H
  o" U5      PM     snPM     n	nnU	$ U Vs/ s H
  o" U5      PM     n	nU	$ s  snf s  snnf s  snf )az  
Prepare image-like inputs for processing by converting each image via `process_image`.

Flattens the input structure and applies `process_image` (implemented by the backend) to each
individual image, converting raw inputs (PIL, NumPy, Tensor) into the backend's working format
with channels-first ordering.

Args:
    images (`ImageInput`):
        The image-like inputs to process.
    expected_ndims (`int`, *optional*, defaults to 3):
        The expected number of dimensions for the images.

Returns:
    `list[torch.Tensor]` or `list[np.ndarray]`: The prepared images in the backend's format,
    with channels-first ordering.
rQ   r   )rS   r   rI   len
isinstancer4   tuple)
r'   r=   rO   rB   r"   process_image_partialhas_nested_structurenested_listimgprocessed_imagess
             r)   _prepare_image_like_inputs-BaseImageProcessor._prepare_image_like_inputs  s    0 ///V '(:(: LT LV L"6{QV:fQiPU3VgmngmXc{ S{!6s!;{ Sgmn   GMMfs 5c :fM	 !TnMs   $	B(-B#>B(B.#B(c                 T    U R                   " U40 UD6nU R                  " U/UQ70 UD6$ )a  
Preprocess image-like inputs by preparing them and dispatching to `_preprocess`.

This method first calls `_prepare_image_like_inputs` to convert raw inputs into the backend's
format, then calls `_preprocess` for the actual batch processing. Override this method in
model-specific processors that need to handle multiple image-like input types (e.g., images
and segmentation maps) or need custom orchestration of the preprocessing pipeline.
)r^   rL   rA   s       r)   _preprocess_image_like_inputs0BaseImageProcessor._preprocess_image_like_inputs)  s4     00B6B8888r+   Nsize	crop_sizepad_sizedefault_to_square
image_mean	image_stdc           	         Uc  0 nUb'  [        U[        5      (       d  [        S	0 [        XS9D6nUb(  [        U[        5      (       d  [        S	0 [        USS9D6nUb(  [        U[        5      (       d  [        S	0 [        USS9D6n[        U[        5      (       a  [	        U5      n[        U[        5      (       a  [	        U5      nXS'   X'S'   X7S'   XWS'   XgS'   U$ )
z
Standardize kwargs to canonical format before validation.
Can be overridden by subclasses to customize the processing of kwargs.
)rc   rf   rd   )
param_namere   )rc   rj   rc   rg   rh   r$   )rW   r   get_size_dictr4   rX   )r'   rc   rd   re   rf   rg   rh   r"   s           r)   r1   &BaseImageProcessor._standardize_kwargs:  s     >FJtX$>$>\m[\D Ix)H)H T={#STI
8X(F(FV-X*"UVHj$''z*Ji&&i(Iv'{%z)|'{r+   
do_rescalerescale_factordo_normalize	do_resizedo_center_cropresamplez7PILImageResampling | tvF.InterpolationMode | int | Nonec                 (    [        UUUUUUU	UUU
S9
  g)z0
Validate the kwargs for the preprocess method.
)
rm   rn   ro   rg   rh   rq   rd   rp   rc   rr   N)r   )r'   rm   rn   ro   rg   rh   rp   rc   rq   rd   rr   r"   s               r)   _validate_preprocess_kwargs.BaseImageProcessor._validate_preprocess_kwargs`  s*    " 	&!)%!)	
r+   c           	          [        U R                  U5        U R                   H  nUR                  U[	        XS5      5        M!     U R
                  " S0 UD6nU R                  " S0 UD6  U R                  " U/UQ70 UD6$ )z+
Preprocess an image or a batch of images.
Nr$   )r   r-   r6   
setdefaultr0   r1   rt   ra   )r'   r=   rB   r"   
kwarg_names        r)   r@   BaseImageProcessor.preprocess~  s     	D--v6 22Jj'$D*IJ 3 ))3F3 	((26211&J4J6JJr+   c                 B  > [         TU ]  5       n0 nUR                  5        HU  u  p4[        U[        5      (       a  [        U5      nUc)  [        [        U 5      US5      nUS:w  a  Ub  XBU'   MM  MO  MQ  XBU'   MW     UR                  SS 5        UR                  SS 5        U$ )N	NOT_FOUND_valid_processor_keysr6   )	r%   to_dictr2   rW   r   dictr0   typer/   )r'   processor_dictfiltered_dictr8   r:   class_defaultr(   s         r)   r}   BaseImageProcessor.to_dict  s    * (..0JC%**U} 'T
C E K/M4M).#& 5N/ &+c" 1 	148/6r+   imagescaledata_formatinput_data_formatc                      [        U4X#US.UD6$ )a>  
Rescale an image by a scale factor. image = image * scale.

Args:
    image (`np.ndarray`):
        Image to rescale.
    scale (`float`):
        The scaling factor to rescale pixel values by.
    data_format (`str` or `ChannelDimension`, *optional*):
        The channel dimension format for the output image. If unset, the channel dimension format of the input
        image is used. Can be one of:
        - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
        - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
    input_data_format (`ChannelDimension` or `str`, *optional*):
        The channel dimension format for the input image. If unset, the channel dimension format is inferred
        from the input image. Can be one of:
        - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
        - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.

Returns:
    `np.ndarray`: The rescaled image.
)r   r   r   )r   )r'   r   r   r   r   r"   s         r)   r   BaseImageProcessor.rescale  s    < urEVgrkqrrr+   meanstdc                      [        U4X#XES.UD6$ )a  
Normalize an image. image = (image - image_mean) / image_std.

Args:
    image (`np.ndarray`):
        Image to normalize.
    mean (`float` or `Iterable[float]`):
        Image mean to use for normalization.
    std (`float` or `Iterable[float]`):
        Image standard deviation to use for normalization.
    data_format (`str` or `ChannelDimension`, *optional*):
        The channel dimension format for the output image. If unset, the channel dimension format of the input
        image is used. Can be one of:
        - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
        - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
    input_data_format (`ChannelDimension` or `str`, *optional*):
        The channel dimension format for the input image. If unset, the channel dimension format is inferred
        from the input image. Can be one of:
        - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
        - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.

Returns:
    `np.ndarray`: The normalized image.
)r   r   r   r   )r   )r'   r   r   r   r   r   r"   s          r)   r   BaseImageProcessor.normalize  s'    B 
;
gm
 	
r+   c                     [        U5      nSU;  d  SU;  a  [        SUR                  5        35      e[        U4US   US   4UUS.UD6$ )ay  
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.

Args:
    image (`np.ndarray`):
        Image to center crop.
    size (`dict[str, int]`):
        Size of the output image.
    data_format (`str` or `ChannelDimension`, *optional*):
        The channel dimension format for the output image. If unset, the channel dimension format of the input
        image is used. Can be one of:
        - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
        - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
    input_data_format (`ChannelDimension` or `str`, *optional*):
        The channel dimension format for the input image. If unset, the channel dimension format is inferred
        from the input image. Can be one of:
        - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
        - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
heightwidthz=The size dictionary must have keys 'height' and 'width'. Got )rc   r   r   )rk   
ValueErrorr5   r   )r'   r   rc   r   r   r"   s         r)   r   BaseImageProcessor.center_crop  sp    8 T"47$#6\]a]f]f]h\ijkk
x.$w-0#/	

 
 	
r+   )r6   )rN   )NNNNNN)
NNNNNNNNNN)NN)-__name__
__module____qualname____firstlineno____doc__r   r-   rf   rn   model_input_namesr   r&   r;   r   r	   rC   rI   rL   intrS   r4   r   r^   ra   r   r~   strr   boolfloatr1   _further_process_kwargsrX   rt   r   r@   r}   npndarrayr   r   r   r   __static_attributes____classcell__)r(   s   @r)   r   r   <   s"   ~@  LN'(#!5 #
R8z 8F<<P 8Ua 8""   OO O 
	O,  	# #  	# 
 &#  
c# J99 &	9
 
9& HLLPKO)-1504!HSM!DcN2X=D! #&c3h7(BTI! %S#X6ADH	!
  $;! DK'$.! 4;&-! 
!H 2 #''+$(2615!% $&*%)NR
4K
 
 Tk	

 E%L(4/
 5<'$.
 $;
 o
 t
 d?
 L
< K Kf\>R KWc K K&c3h 0 6:;?szzs s ++d2	s
 !11D8s 
sL 6:;?#
zz#
 huo%#
 Xe_$	#

 ++d2#
 !11D8#
 
#
R 6:;?%
zz%
 38n%
 ++d2	%

 !11D8%
 
%
 %
r+   r   r   r   shortest_edgelongest_edge
max_height	max_widthc                     [        U [        5      (       d  g[        U R                  5       5      n[         H
  nX:X  d  M
    g   g)NFT)rW   r~   setr5   VALID_SIZE_DICT_KEYS)	size_dictsize_dict_keysallowed_keyss      r)   is_valid_size_dictr     s;    i&&)*N,) - r+   rc   max_sizerf   height_width_orderr>   c                    [        U [        5      (       a  U(       a  Ub  [        S5      eX S.$ [        U [        5      (       a  U(       d  SU 0nUb  XS'   U$ [        U [        [        45      (       a  U(       a  U S   U S   S.$ [        U [        [        45      (       a  U(       d  U S   U S   S.$ U c  Ub  U(       a  [        S5      eSU0$ [        SU  35      e)	NzLCannot specify both size as an int, with default_to_square=True and max_size)r   r   r   r   r   r   z7Cannot specify both default_to_square=True and max_sizez+Could not convert size input to size dict: )rW   r   r   rX   r4   )rc   r   rf   r   r   s        r)   convert_to_size_dictr   (  s     $!2kll.. 
D#		'8$d+	(0n%	D5$-	(	(-?q'DG44	D5$-	(	(1Cq'DG44	(.VWW))
B4&I
JJr+   c           
      R   [        U [        [        -  5      (       d2  [        XX25      n[        R                  U S[         SU  SU S35        O#[        U [        5      (       a  [        U 5      nOU n[        U5      (       d%  [        U S[         SUR                  5        35      eU$ )aT  
Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards
compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,
width) or (width, height) format.

- If `size` is tuple, it is converted to `{"height": size[0], "width": size[1]}` or `{"height": size[1], "width":
size[0]}` if `height_width_order` is `False`.
- If `size` is an int, and `default_to_square` is `True`, it is converted to `{"height": size, "width": size}`.
- If `size` is an int and `default_to_square` is False, it is converted to `{"shortest_edge": size}`. If `max_size`
  is set, it is added to the dict as `{"longest_edge": max_size}`.
- If `size` is `None` and `default_to_square` is False, the result is `{"longest_edge": max_size}` (requires
  `max_size` to be set). Tuple/list/SizeDict/dict `size` values do not use `max_size`.

Args:
    size (`int | Iterable[int] | dict[str, int] | SizeDict`, *optional*):
        The `size` parameter to be cast into a size dictionary.
    max_size (`int | None`, *optional*):
        With `default_to_square=False`, sets `longest_edge` when `size` is an int or `None`; unused for dict,
        `SizeDict`, or tuple/list `size`. Raises if set with `default_to_square=True` when `size` is an int or `None`.
    height_width_order (`bool`, *optional*, defaults to `True`):
        If `size` is a tuple, whether it's in (height, width) or (width, height) order.
    default_to_square (`bool`, *optional*, defaults to `True`):
        If `size` is an int, whether to default to a square image or not.
z@ should be a dictionary with one of the following sets of keys: z, got z. Converted to .z- must have one of the following set of keys: )
rW   r~   r   r   loggerinfor   r   r   r5   )rc   r   r   rf   rj   r   s         r)   rk   rk   G  s    > dD8O,,(9J_	lZ[oZppvw{v| }&Kq*	

 
D(	#	#J		i((lGH\G]]cdmdrdrdtcuv
 	
 r+   original_sizepossible_resolutionsc                     U u  p#SnSn[        S5      nU H_  u  px[        X-  Xr-  5      n	[        X9-  5      [        X)-  5      p[        X-  X2-  5      nX-  U-
  nX:  d  X:X  d  MQ  X:  d  MX  UnUnXx4nMa     U$ )ad  
Selects the best resolution from a list of possible resolutions based on the original size.

This is done by calculating the effective and wasted resolution for each possible resolution.

The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.

Args:
    original_size (tuple):
        The original size of the image in the format (height, width).
    possible_resolutions (list):
        A list of possible resolutions in the format [(height1, width1), (height2, width2), ...].

Returns:
    tuple: The best fit resolution in the format (height, width).
Nr   inf)r   minr   )r   r   original_heightoriginal_widthbest_fitmax_effective_resolutionmin_wasted_resolutionr   r   r   downscaled_widthdownscaled_heighteffective_resolutionwasted_resolutions                 r)   select_best_resolutionr   y  s    " '4#OH !%L-E*F,DE.1.2H.I3OfKg+"#3#GIij"^/CC: <ARAj';$$5!H . Or+   c                     [        XS9u  p4Uu  pVXd-  nXS-  nXx:  a'  Un	[        [        R                  " X7-  5      U5      n
X4$ Un
[        [        R                  " XH-  5      U5      n	X4$ )zm
Given an image and a target resolution, calculate the output size of the image after cropping to the target
)channel_dim)r   r   mathceil)r   target_resolutionr   r   r   target_heighttarget_widthscale_wscale_h	new_width
new_heights              r)   get_patch_output_sizer     s     '5U&Z#O"3M+G-G 	?#<=}M

    #
		.":;\J	  r+   )NNTT)NNTTrc   )8r   collections.abcr   copyr   	functoolsr   typingr   numpyr   huggingface_hub.dataclassesr   image_processing_baser	   r
   image_transformsr   r   r   image_utilsr   r   r   r   r   r   processing_utilsr   r   utilsr   r   r   r   r   torchvision.transforms.v2r   tvF
get_loggerr   r   INIT_SERVICE_KWARGSr   r   r   r   r   r~   r   r   rk   rX   r4   r   r   r$   r+   r)   <module>r      s    $     ; E = =  3  / ; 
		H	%  U
- U
r wn%;  (,"#	K


$KDjK K 	K
 
#s(^K@ DH#"/

S#X
.
9D
@/Dj/ / 	/ 
/d#% #t #PU #L!r+   