
    rhR                         d Z ddlmZmZ ddlZddlmZmZm	Z	 ddl
mZmZmZmZ ddlmZmZmZmZmZmZmZmZmZmZmZmZmZ ddlmZmZm Z   e jB                  e"      Z# e       rddl$Z$ G d	 d
e      Z%d
gZ&y)z Image processor class for LLaVa.    )OptionalUnionN   )BaseImageProcessorBatchFeatureget_size_dict)convert_to_rgbget_resize_output_image_sizeresizeto_channel_dimension_format)OPENAI_CLIP_MEANOPENAI_CLIP_STDChannelDimension
ImageInputPILImageResamplingget_image_sizeinfer_channel_dimension_formatis_scaled_imagemake_list_of_imagesto_numpy_arrayvalid_imagesvalidate_kwargsvalidate_preprocess_arguments)
TensorTypeis_vision_availableloggingc            $       2    e Zd ZdZdgZdddej                  ddddddddfdeded	ee	e
ef      d
ededee	e
ef      dedeeef   dedeeeee   f      deeeee   f      deddf fdZ	 	 	 ddej"                  deeeeeef   f   deee
ef      deee
ef      dej(                  f
dZej                  ddfdej"                  d	e	e
ef   d
edeee
ef      deee
ef      dej"                  fdZdddddddddddddej.                  dfdedee   dee   d	ee	e
ef      d
ee   dee   dee   dee   dee   dee   deeeee   f      deeeee   f      dee   deee
ef      dee   deee
ef      dej6                  j6                  f"dZ xZS )LlavaImageProcessora  
    Constructs a LLaVa image processor.

    Args:
        do_pad (`bool`, *optional*, defaults to `False`):
            Whether to pad the image to a square based on the longest edge.
            The padding value is determined by the `image_mean` parameter.
            Can be overridden by `do_pad` in the `preprocess` method.
        do_resize (`bool`, *optional*, defaults to `True`):
            Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
            `do_resize` in the `preprocess` method.
        size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
            Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
            the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
            method.
        resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
            Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
        do_center_crop (`bool`, *optional*, defaults to `True`):
            Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
            `preprocess` method.
        crop_size (`dict[str, int]` *optional*, defaults to 224):
            Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
            method.
        do_rescale (`bool`, *optional*, defaults to `True`):
            Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
            the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
            Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
            method.
        do_normalize (`bool`, *optional*, defaults to `True`):
            Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
        image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
            Mean to use if normalizing the image. This is a float or list of floats the length of the number of
            channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
            Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
            number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
            Can be overridden by the `image_std` parameter in the `preprocess` method.
        do_convert_rgb (`bool`, *optional*, defaults to `True`):
            Whether to convert the image to RGB.
    pixel_valuesFTNgp?do_pad	do_resizesizeresampledo_center_crop	crop_size
do_rescalerescale_factordo_normalize
image_mean	image_stddo_convert_rgbreturnc                 V   t        |   d
i | ||nddi}t        |d      }||nddd}t        |dd      }|| _        || _        || _        || _        || _        || _        || _	        || _
        |	| _        |
|
nt        | _        ||nt        | _        || _        g d	| _        y )Nshortest_edge   F)default_to_square)heightwidthTr%   )r0   
param_name)imagesr    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   return_tensorsdata_formatinput_data_format )super__init__r   r    r!   r"   r#   r$   r%   r&   r'   r(   r   r)   r   r*   r+   _valid_processor_keys)selfr    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   kwargs	__class__s                 /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/llava/image_processing_llava.pyr:   zLlavaImageProcessor.__init__b   s      	"6"'tos-CTU;!*!6IsUX<Y	!)tP[\	"	 ,"$,((2(>*DT&/&;,&
"    imagebackground_colorr6   r7   c                 n   t        ||      \  }}|t        j                  k(  r|j                  d   n|j                  d   }||k(  r|t	        |||      }|S |}|S t        ||      }t        |t              r|g}nt        |      |k7  rt        d| d      |t        j                  k(  r|t        j                  |||f|j                        }	t        |      D ]  \  }
}||	|
ddddf<    ||kD  r||z
  dz  }||	dd|||z   ddf<   n||z
  dz  }||	dddd|||z   f<   n{t        j                  |||f|j                        }	t        |      D ]  \  }
}||	dddd|
f<    ||kD  r||z
  dz  }||	|||z   ddddf<   n||z
  dz  }||	dd|||z   ddf<   |t	        |	||      }|S |	}|S )a  
        Pads an image to a square based on the longest edge.

        Args:
            image (`np.ndarray`):
                The image to pad.
            background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
                The color to use for the padding. Can be an integer for single channel or a
                tuple of integers representing for multi-channel images. If passed as integer
                in mutli-channel mode, it will default to `0` in subsequent channels.
            data_format (`str` or `ChannelDimension`, *optional*):
                The channel dimension format for the output image. Can be one of:
                    - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                    - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                If unset, will use same as the input image.
            input_data_format (`str` or `ChannelDimension`, *optional*):
                The channel dimension format for the input image. Can be one of:
                    - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                    - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                If unset, will use the inferred format of the input image.

        Returns:
            `np.ndarray`: The padded image.
        r   Nz(background_color must have no more than z) elements to match the number of channels)dtype   )r   r   FIRSTshaper   max
isinstanceintlen
ValueErrornpzerosrE   	enumerate)r<   rA   rB   r6   r7   r1   r2   num_channelsmax_dimresulticolorstarts                r?   pad_to_squarez!LlavaImageProcessor.pad_to_square   sA   > 'u.?@):>N>T>T)Tu{{1~Z_ZeZefhZiU? * ,E;@QR 
 L  
 Lfe$ &, 01!"l2:<.Hqr   0 6 66XX|Wg>ekkRF%&67 (5"'q!Qw(v~ 6)a/7<q%%&.0!34 5Q.6;q!UUU]223XXw>ekkRF%&67 (5"'q!Qw(v~ 6)a/7<uuv~-q!34 5Q.6;q%%%-/23 T_Sj'=NO 	  qw 	 r@   c                     d}d|v r|d   }d}nd|v rd|v r|d   |d   f}nt        d      t        ||||      }t        |f||||d|S )	aZ  
        Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
        resized to keep the input aspect ratio.

        Args:
            image (`np.ndarray`):
                Image to resize.
            size (`dict[str, int]`):
                Size of the output image.
            resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
                Resampling filter to use when resiizing the image.
            data_format (`str` or `ChannelDimension`, *optional*):
                The channel dimension format of the image. If not provided, it will be the same as the input image.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format of the input image. If not provided, it will be inferred.
        Tr.   Fr1   r2   zASize must contain either 'shortest_edge' or 'height' and 'width'.)r"   r0   r7   )r"   r#   r6   r7   )rM   r
   r   )	r<   rA   r"   r#   r6   r7   r=   r0   output_sizes	            r?   r   zLlavaImageProcessor.resize   s    2 !d"(D %'T/NDM2D`aa2//	
 
#/
 
 	
r@   r4   r5   c                    ||n| j                   }||n| j                  }||n| j                  }t        |dd      }||n| j                  }||n| j
                  }||n| j                  }t        |dd      }||n| j                  }|	|	n| j                  }	|
|
n| j                  }
||n| j                  }||n| j                  }||n| j                  }t        |j                         | j                         t!        |      }t#        |      st%        d      t'        ||	|
|||||||
       |r|D cg c]  }t)        |       }}|D cg c]  }t+        |       }}t-        |d	         r|rt.        j1                  d
       |t3        |d	         }g }|D ]  }|r.| j5                  |t7        d | j                  D              |      }|r| j9                  ||||      }|r| j;                  |||      }|r| j=                  ||	|      }|
r| j?                  ||||      }tA        |||      }|jC                  |        tE        d|i|      S c c}w c c}w )a  
        Preprocess an image or batch of images.

        Args:
            images (`ImageInput`):
                Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
                passing in images with pixel values between 0 and 1, set `do_rescale=False`.
            do_pad (`bool`, *optional*, defaults to `self.do_pad`):
                Whether to pad the image to a square based on the longest edge.
                The padding value is determined by the `image_mean` parameter.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image.
            size (`dict[str, int]`, *optional*, defaults to `self.size`):
                Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
                the longest edge resized to keep the input aspect ratio.
            resample (`int`, *optional*, defaults to `self.resample`):
                Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
                has an effect if `do_resize` is set to `True`.
            do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
                Whether to center crop the image.
            crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
                Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image.
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                Rescale factor to rescale the image by if `do_rescale` is set to `True`.
            do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
                Whether to normalize the image.
            image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
                Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
            image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
                Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
                `True`.
            do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
                Whether to convert the image to RGB.
            return_tensors (`str` or `TensorType`, *optional*):
                The type of tensors to return. Can be one of:
                - Unset: Return a list of `np.ndarray`.
                - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
                - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
                - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
                - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
            data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format for the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - Unset: Use the channel dimension format of the input image.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
        r"   F)r3   r0   r%   T)captured_kwargsvalid_processor_keyszkInvalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.)
r&   r'   r(   r)   r*   r$   r%   r!   r"   r#   r   zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.c              3   8   K   | ]  }t        |d z          yw)   N)rK   ).0xs     r?   	<genexpr>z1LlavaImageProcessor.preprocess.<locals>.<genexpr>  s     *QA3q3w<*Qs   )rA   rB   r7   )rA   r"   r#   r7   )rA   r"   r7   )rA   scaler7   )rA   meanstdr7   )input_channel_dimr   )datatensor_type)#r    r!   r"   r   r#   r$   r%   r&   r'   r(   r)   r*   r+   r   keysr;   r   r   rM   r   r	   r   r   loggerwarning_oncer   rW   tupler   center_croprescale	normalizer   appendr   )r<   r4   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r5   r6   r7   r=   rA   processed_imagess                       r?   
preprocesszLlavaImageProcessor.preprocess  s   T "-4;;!*!6IDNN	'tTYYTfN'38+9+E4K^K^!*!6IDNN	!)W[\	#-#9Zt
+9+E4K^K^'3'?|TEVEV#-#9Zt
!*!6IDNN	+9+E4K^K^DLfLfg$V,F#: 
 	&!)%!)	
 9?@nU+@F@ 6<<E.'<<6!9%*s
 $ >vay I 	+E**%**Q*Q%Q&7 +  %dXars((u9Xi(j5ZkljiSd '  0{VghE##E*/	+2 .2B!CQ_``S A =s   >I I%)r   NN)__name__
__module____qualname____doc__model_input_namesr   BICUBICboolr   dictstrrK   r   floatlistr:   rN   ndarrayrk   r   arrayrW   r   rG   r   r   PILImagerq   __classcell__)r>   s   @r?   r   r   5   s   (T (( )-'9'A'A#.2,3!:>9=#3
3
 3
 tCH~&	3

 %3
 3
 DcN+3
 3
 c5j)3
 3
 U5$u+#5673
 E%e"4563
 3
 
3
p >?>BDHLzzL  U3S=%9 9:L eC)9$9:;	L
 $E#/?*?$@AL 
Lf (:'A'A>BDH/
zz/
 38n/
 %	/

 eC)9$9:;/
 $E#/?*?$@A/
 
/
h "&$()-15)-#'%)*.'+:>9=)-;?2B2H2HDH#ZaZa Za D>	Za
 tCH~&Za -.Za !Za C=Za TNZa !Za tnZa U5$u+#567Za E%e"456Za !Za !sJ!78Za  ./!Za" $E#/?*?$@A#Za& 
'Zar@   r   )'ru   typingr   r   numpyrN   image_processing_utilsr   r   r   image_transformsr	   r
   r   r   image_utilsr   r   r   r   r   r   r   r   r   r   r   r   r   utilsr   r   r   
get_loggerrr   ri   r   r   __all__r8   r@   r?   <module>r      sw    ' "  U U     > = 
		H	% |a, |a~ !
!r@   