
    rh\                        d Z ddlZddlmZmZ ddlZddlmZm	Z	 ddl
mZmZmZ ddlmZmZmZmZmZmZmZmZmZmZmZmZmZ ddlmZmZ dd	lm Z   ejB                  e"      Z#	 	 	 	 dd
e$de$de$de$de$de$de$fdZ% G d de      Z&dgZ'y)z#Image processor class for GLM-4.1V.    N)OptionalUnion   )BaseImageProcessorBatchFeature)convert_to_rgbresizeto_channel_dimension_format)OPENAI_CLIP_MEANOPENAI_CLIP_STDChannelDimension
ImageInputPILImageResamplingget_image_sizeinfer_channel_dimension_formatis_scaled_imagemake_flat_list_of_imagesmake_list_of_imagesto_numpy_arrayvalid_imagesvalidate_preprocess_arguments)
TensorTypelogging)
VideoInput
num_framesheightwidthtemporal_factorfactor
min_pixels
max_pixelsc                    | |k  rt        d|  d|       ||k  s||k  rt        d| d| d|       t        ||      t        ||      z  dkD  r%t        dt        ||      t        ||      z         t        ||z        |z  }t        ||z        |z  }t        | |z        |z  }	|	|z  |z  |kD  r^t	        j
                  | |z  |z  |z        }
t	        j                  ||
z  |z        |z  }t	        j                  ||
z  |z        |z  }||fS |	|z  |z  |k  rZt	        j
                  || |z  |z  z        }
t	        j                  ||
z  |z        |z  }t	        j                  ||
z  |z        |z  }||fS )Nzt:z% must be larger than temporal_factor:zheight:z
 or width:z must be larger than factor:   z4absolute aspect ratio must be smaller than 200, got )
ValueErrormaxminroundmathsqrtfloorceil)r   r   r   r   r   r    r!   h_barw_bart_barbetas              /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/glm4v/image_processing_glm4v.pysmart_resizer1   2   s    O#2j\)NN_`aa%&.76(*UG;WX^W_`aa	VU	c&%0	03	6B3vuCUX[\bdiXjCjBkl
 	
 &6/"V+E%&.!F*E*.//AEu}uz)yy*v-5CD

6D=612V;

54<&01F: %< 
		+yyzF':U'BCD		&4-&01F:		%$,/069%<    c            &           e Zd ZdZddgZddej                  dddddddddfd	ed
ee	e
ef      dededeeef   dedeeeee   f      deeeee   f      dededededdf fdZddddddddddddej"                  dfdeeef   d	ee   d
ee	e
ef      dedee   dee   dee   deeeee   f      deeeee   f      dee   dee   dee   dee   dee   deee
ef      fdZddddddddddddddej"                  dfdeded	ee   d
ee	e
ef      dedee   dee   dee   deeeee   f      deeeee   f      dee   dee   dee   dee   deee
ef      dee   deee
ef      f"dZd!dedefd Z xZS )"Glm4vImageProcessora
  
    Constructs a GLM-4V image processor that dynamically resizes images based on the original images.

    Args:
        do_resize (`bool`, *optional*, defaults to `True`):
            Whether to resize the image's (height, width) dimensions.
        size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 15000}`):
            Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter
            in the `preprocess` method. Available options are:
                - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
                    Do NOT keep the aspect ratio.
                - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
                    the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
                    less or equal to `longest_edge`.
                - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
                    aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
                    `max_width`.
        resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
            Resampling filter to use when resizing the image.
        do_rescale (`bool`, *optional*, defaults to `True`):
            Whether to rescale the image by the specified scale `rescale_factor`.
        rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
            Scale factor to use if rescaling the image.
        do_normalize (`bool`, *optional*, defaults to `True`):
            Whether to normalize the image.
        image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
            Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
        image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
            Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
        do_convert_rgb (`bool`, *optional*, defaults to `True`):
            Whether to convert the image to RGB.
        patch_size (`int`, *optional*, defaults to 14):
            The spatial patch size of the vision encoder.
        temporal_patch_size (`int`, *optional*, defaults to 2):
            The temporal patch size of the vision encoder.
        merge_size (`int`, *optional*, defaults to 2):
            The merge size of the vision encoder to llm encoder.
    pixel_valuesimage_grid_thwTNgp?      	do_resizesizeresample
do_rescalerescale_factordo_normalize
image_mean	image_stddo_convert_rgb
patch_sizetemporal_patch_size
merge_sizereturnc                     t        |   di | |d|vsd|vrt        d      ddd}|| _        || _        || _        || _        || _        || _        ||nt        | _
        ||nt        | _        |
| _        || _        || _        |	| _        y )Nshortest_edgelongest_edge:size must contain 'shortest_edge' and 'longest_edge' keys. 1  q rG   rH    )super__init__r$   r:   r9   r;   r<   r=   r>   r   r?   r   r@   rB   rC   rD   rA   )selfr9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   kwargs	__class__s                 r0   rO   zGlm4vImageProcessor.__init__}   s      	"6"!<VZ@ZYZZ%.PD	" $,((2(>*DT&/&;$#6 $,r2   imagesdata_formatinput_data_formatc                    t        |      }|r|D cg c]  }t        |       }}|D cg c]  }t        |       }}|r#t        |d         rt        j                  d       |t        |d         }t        |d   |      \  }}||}}g }|D ]w  }|r't        |||||
|z        \  }}t        |||f||      }|r| j                  |||      }|r| j                  |||	|      }t        |||      }|j                  |       y t        j                  |      }|t         j"                  k(  r|j%                  dd	d
d      }|j&                  d   |z  dk7  rXt        j(                  |d   t        j*                     ||j&                  d   |z  z
  d      }t        j,                  ||gd      }|j&                  d
   }|j&                  d   |z  }||
z  ||
z  }}|j/                  |||||z  ||
||z  ||
	      }|j%                  dd	ddddd
dd	      }|j/                  ||z  |z  ||z  |
z  |
z        }||||ffS c c}w c c}w )a~  
        Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.

        Args:
            images (`ImageInput`):
                Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
            vision_info (`List[Dict]`, *optional*):
                Optional list of dictionaries containing additional information about vision inputs.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image.
            size (`Dict[str, int]`, *optional*, defaults to `self.size`):
                Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
            resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
                Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image.
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                Scale factor to use if rescaling the image.
            do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
                Whether to normalize the image.
            image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
                Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
            image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
                Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
            patch_size (`int`, *optional*, defaults to `self.patch_size`):
                The spatial patch size of the vision encoder.
            temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
                The temporal patch size of the vision encoder.
            merge_size (`int`, *optional*, defaults to `self.merge_size`):
                The merge size of the vision encoder to llm encoder.
            do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
                Whether to convert the image to RGB.
            data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format for the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - Unset: Use the channel dimension format of the input image.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.   - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
        r   zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.)channel_dim)r   r   r   r   r   )r:   r;   rU   )scalerU   )imagemeanstdrU   )input_channel_dimr      r8   )axis               )r   r   r   r   loggerwarning_oncer   r   r1   r	   rescale	normalizer
   appendnparrayr   LAST	transposeshaperepeatnewaxisconcatenatereshape)rP   rS   r9   r:   r;   r<   r=   r>   r?   r@   rB   rC   rD   rA   rT   rU   rY   r   r   resized_heightresized_widthprocessed_imagespatchesrepeatschannelgrid_tgrid_hgrid_wflatten_patchess                                r0   _preprocesszGlm4vImageProcessor._preprocess   s   z %V,9?@nU+@F@ 6<<E.'<</&)4s $ >vay I&vay>OP(. 	+E0<2!$7%
21-  ?(fw U.TefjiSd '  0{VghE##E*-	+0 ((+,*///''1a3G==11Q6iiBJJ')<a@PSf@f)gnoG nngw%7a@G--"q!%88':5}
7R//j j 

 ##Aq!Q1aA>!//VOf$g0C&Cj&PS]&]
  888I A =s
   H<Ivideosreturn_tensorsc                 B   |d|vsd|vrt        d      ddd}||n| j                  }||n| j                  }||n| j                  }||n| j                  }||n| j
                  }|	|	n| j                  }	|
|
n| j                  }
||n| j                  }||n| j                  }||n| j                  }||n| j                  }|t        |      }|t        |      st        d      t        |||	|
|||       i }|g g }}|D ]G  }| j                  ||||||||	|
||||||	      \  }}|j!                  |       |j#                  |       I t%        j&                  |      }t%        j&                  |      }|j)                  ||d
       t+        ||      S )a  
        Args:
            images (`ImageInput`):
                Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
                passing in images with pixel values between 0 and 1, set `do_rescale=False`.
            videos (`VideoInput`):
                Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If
                passing in videos with pixel values between 0 and 1, set `do_rescale=False`.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image.
            size (`Dict[str, int]`, *optional*, defaults to `self.size`):
                Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
                the longest edge resized to keep the input aspect ratio.
            resample (`int`, *optional*, defaults to `self.resample`):
                Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
                has an effect if `do_resize` is set to `True`.
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image.
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                Rescale factor to rescale the image by if `do_rescale` is set to `True`.
            do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
                Whether to normalize the image.
            image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
                Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
            image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
                Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
                `True`.
                The max pixels of the image to resize the image.
            patch_size (`int`, *optional*, defaults to `self.patch_size`):
                The spatial patch size of the vision encoder.
            temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
                The temporal patch size of the vision encoder.
            merge_size (`int`, *optional*, defaults to `self.merge_size`):
                The merge size of the vision encoder to llm encoder.
            do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
                Whether to convert the image to RGB.
            return_tensors (`str` or `TensorType`, *optional*):
                The type of tensors to return. Can be one of:
                - Unset: Return a list of `np.ndarray`.
                - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
                - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
                - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
                - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
            data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format for the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - Unset: Use the channel dimension format of the input image.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.

        rG   rH   rI   rJ   rK   rL   zkInvalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.)r=   r>   r?   r@   r9   r:   r;   )r9   r:   r;   r<   r=   r>   r?   r@   rB   rC   rD   rT   rA   rU   )r5   r6   )datatensor_type)r$   r9   r;   r<   r=   r>   r?   r@   rB   rC   rD   rA   r   r   r   r}   extendri   rj   rk   updater   )rP   rS   r~   r9   r:   r;   r<   r=   r>   r?   r@   rB   rC   rD   rA   r   rT   rU   r   r5   vision_grid_thwsrY   rv   r6   s                           r0   
preprocesszGlm4vImageProcessor.preprocess'  s   Z !<VZ@ZYZZ%.PD!*!6IDNN	'38#-#9Zt
+9+E4K^K^'3'?|TEVEV#-#9Zt
!*!6IDNN	#-#9Zt
5H5T1Z^ZrZr#-#9Zt
+9+E4K^K^-f5Fl6&:: 
 	&)%!	
 -/*L 8*.*:*:'%)#1!-)')(;) +#1&7 +; +'" ##G, ''7'8( 88L1L!xx(89KKIYZ[>BBr2   r   r   c                     |j                  d| j                        }|j                  d| j                        }||z  }t        | j                  |||| j                        \  }}||z  ||z  }
}	|	|
z  S )a  
        A utility that returns number of image patches for a given image size.

        Args:
            height (`int`):
                Height of the input image.
            width (`int`):
                Width of the input image.
            images_kwargs (`dict`, *optional*)
                Any kwargs to override defaults of the image processor.
        Returns:
            `int`: Number of image patches per image.
        rB   rD   )r   r   r   r   r   )getrB   rD   r1   rC   )rP   r   r   images_kwargsrB   rD   r   rs   rt   rz   r{   s              r0   get_number_of_image_patchesz/Glm4vImageProcessor.get_number_of_image_patches  s     #&&|T__E
"&&|T__E
j((4// 44)
% (:5}
7Rr2   )N)__name__
__module____qualname____doc__model_input_namesr   BICUBICboolr   dictstrintr   floatlistrO   r   FIRSTr   r   r}   r   r   r   __classcell__)rR   s   @r0   r4   r4   S   s   %N ()9: )-'9'A'A,3!:>9=##$"-"- tCH~&"- %	"-
 "- c5j)"- "- U5$u+#567"- E%e"456"- "- "- !"- "- 
"-N %))-'+%)*.'+:>9=$(-1$()-2B2H2HDH!D9j*,-D9 D>D9 tCH~&	D9
 %D9 TND9 !D9 tnD9 U5$u+#567D9 E%e"456D9 SMD9 &c]D9 SMD9 !D9 ./D9  $E#/?*?$@A!D9R "$()-'+%)*.'+:>9=$(-1$()-;?2B2H2HDH%MCMC MC D>	MC
 tCH~&MC %MC TNMC !MC tnMC U5$u+#567MC E%e"456MC SMMC &c]MC SMMC !MC  !sJ!78!MC" ./#MC$ $E#/?*?$@A%MC^# c r2   r4   )r8      rJ   i   )(r   r(   typingr   r   numpyrj   image_processing_utilsr   r   image_transformsr   r	   r
   image_utilsr   r   r   r   r   r   r   r   r   r   r   r   r   utilsr   r   video_utilsr   
get_loggerr   re   r   r1   r4   __all__rM   r2   r0   <module>r      s    *  "  F 
    ) % 
		H	% 0  	
   B}, }@ !
!r2   