
    rh                     J   d Z ddlZddlmZmZ ddlZddlmZm	Z	m
Z
 ddlmZmZmZ ddlmZmZmZmZmZmZmZmZmZmZ ddlmZmZmZmZmZm Z m!Z!  e       rddl"Z" e jF                  e$      Z%d	ee&e&e      e&e   ef   d
e&e&e      fdZ' G d de	      Z( G d de      Z)dgZ*y)zImage processor class for Fuyu.    N)OptionalUnion   )BaseImageProcessorBatchFeatureget_size_dict)padresizeto_channel_dimension_format)
ChannelDimension
ImageInputPILImageResamplingget_image_sizeinfer_channel_dimension_formatis_scaled_imageis_valid_imagemake_list_of_imagesto_numpy_arrayvalidate_preprocess_arguments)
TensorTypefilter_out_non_signature_kwargsis_torch_availableis_torch_deviceis_torch_dtypeloggingrequires_backendsimagesreturnc                     t        |       r| ggS t        | t              rt        d | D              r| S t        | t              r| D cg c]  }t	        |       c}S t        d      c c}w )Nc              3   <   K   | ]  }t        |t                y wN)
isinstancelist).0images     /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/fuyu/image_processing_fuyu.py	<genexpr>z.make_list_of_list_of_images.<locals>.<genexpr>@   s     'TE
5$(?'Ts   zHimages must be a list of list of images or a list of images or an image.)r   r"   r#   allr   
ValueError)r   r%   s     r&   make_list_of_list_of_imagesr*   :   se     fz&$C'TV'T$T&$8>?u#E*??
_
`` @s   A(c                   6    e Zd ZdZddeeeef      fdZddZ	y)FuyuBatchFeaturez
    BatchFeature class for Fuyu image processor and processor.

    The outputs dictionary from the processors contains a mix of tensors and lists of tensors.
    Ntensor_typec                   	 || S | j                  |      \  fd	fd}| j                         D ]  \  	}t        |t              r=t        |d   t              r*|D cg c]  }|D cg c]
  } ||       c} c}}| 	<   St        |t              r|D cg c]
  } ||       c}| 	<   } ||      | 	<    | S c c}w c c}}w c c}w )a5  
        Convert the inner content to tensors.

        Args:
            tensor_type (`str` or [`~utils.TensorType`], *optional*):
                The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
                `None`, no modification is done.
        )r-   c                 (     |       r| S  |       S r!    )elem	as_tensor	is_tensors    r&   _convert_tensorz<FuyuBatchFeature.convert_to_tensors.<locals>._convert_tensor^   s    T?"    c                 V    	  |       S #  dk(  rt        d      t        d      xY w)Noverflowing_valueszKUnable to create tensor returning overflowing values of different lengths. zUnable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.)r)   )r1   r4   keys    r&   _safe_convert_tensorzAFuyuBatchFeature.convert_to_tensors.<locals>._safe_convert_tensorc   sB    &t,,..$%rss X s    (r   )_get_is_as_tensor_fnsitemsr"   r#   )
selfr-   r9   valueelemsr1   r4   r2   r3   r8   s
         @@@@r&   convert_to_tensorsz#FuyuBatchFeature.convert_to_tensorsP   s     K#99k9R	9	#
		 **, 		8JC%&:eAh+EY^_PUUKT248K_S	E4(DIJD1$7JS	 17S			8  L_ Ks   (	C1C C!CCc           
         t        | dg       ddli }j                  d      et              dkD  rWd   }t	        |      rnFt        |t              st        |      st        |t              r|nt        dt        |       d      fd}| j                         D ]  \  }}t        |t              rGt        |d   t              r4g }|D ]'  }	|j                  |	D 
cg c]
  }
 ||
       c}
       ) |||<   ]t        |t              r|D 
cg c]
  }
 ||
       c}
||<    ||      ||<    || _        | S c c}
w c c}
w )a  
        Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
        different `dtypes` and sending the `BatchFeature` to a different `device`.

        Args:
            args (`Tuple`):
                Will be passed to the `to(...)` function of the tensors.
            kwargs (`Dict`, *optional*):
                Will be passed to the `to(...)` function of the tensors.

        Returns:
            [`BatchFeature`]: The same instance after modification.
        torchr   Ndevicez*Attempting to cast a BatchFeature to type z. This is not supported.c                 x     j                   |       r | j                  i S | j                        S | S )N)rB   )is_floating_pointto)r1   argsrB   kwargsrA   s    r&   _toz FuyuBatchFeature.to.<locals>._to   sE    &u&&t,tww///!wwfw--Kr5   )r   rA   getlenr   r"   strr   intr)   r;   r#   appenddata)r<   rF   rG   new_dataargrH   kvnew_vr>   r1   rB   rA   s    ``        @@r&   rE   zFuyuBatchFeature.to{   s?    	$	*H%>c$i!mq'Cc"C%)=CQTAU !#McRUhZWo!pqq	 JJL 	%DAq!T"z!A$'= @ELL!>#d)!>?@#At$567Ts4y7!!f	% 	 "? 8s   'EE
r!   )r   r   )
__name__
__module____qualname____doc__r   r   rK   r   r?   rE   r0   r5   r&   r,   r,   I   s'    )huS*_7M.N )V8r5   r,   c            !       v    e Zd ZdZg dZddej                  dddddddddfd	ed
ee	e
ef      dededede
dedeeee   f   deeee   f   dededee	e
ef      f fdZej                  ddfdej"                  d
e	e
ef   dedeee
ef      deee
ef      dej"                  fdZ	 	 	 	 d-dej"                  d
e	e
ef   de
dedeee
ef      deee
ef      dej"                  fdZ e       ddddddddddddej,                  ddfd	ee   d
ee	e
ef      dee   dee   dee   dee
   dee   dee   dee   dee   dee   dee	e
ef      deee
ef      deee
ef      dee   fd       Zd.d ed!edee	e
ef      defd"Zd.dd#dee	e
ef      dd#fd$Z	 d.d%d#d&d#d'd#d(d#d)ed*ed+edee	e
ef      defd,Z xZS )/FuyuImageProcessora	  
    This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should
    handle:

    - Processing Images:
        Taking a batch of images as input. If the images are variable-sized, it resizes them based on the desired patch
        dimensions. The image output is always img_h, img_w of (1080, 1920)

        Then, it patches up these images using the patchify_image function.

    - Creating Image Input IDs:
        For each patch, a placeholder ID is given to identify where these patches belong in a token sequence. For
        variable-sized images, each line of patches is terminated with a newline ID.

    - Image Patch Indices:
        For each image patch, the code maintains an index where these patches should be inserted in a token stream.


    Args:
        do_resize (`bool`, *optional*, defaults to `True`):
            Whether to resize the image to `size`.
        size (`dict[str, int]`, *optional*, defaults to `{"height": 1080, "width": 1920}`):
            Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
        resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
            `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
        do_pad (`bool`, *optional*, defaults to `True`):
            Whether to pad the image to `size`.
        padding_value (`float`, *optional*, defaults to 1.0):
            The value to pad the image with.
        padding_mode (`str`, *optional*, defaults to `"constant"`):
            The padding mode to use when padding the image.
        do_normalize (`bool`, *optional*, defaults to `True`):
            Whether to normalize the image.
        image_mean (`float`, *optional*, defaults to 0.5):
            The mean to use when normalizing the image.
        image_std (`float`, *optional*, defaults to 0.5):
            The standard deviation to use when normalizing the image.
        do_rescale (`bool`, *optional*, defaults to `True`):
            Whether to rescale the image.
        rescale_factor (`float`, *optional*, defaults to `1 / 255`):
            The factor to use when rescaling the image.
        patch_size (`dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`):
            Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
    r   image_input_idsimage_patchesimage_patch_indices_per_batch#image_patch_indices_per_subsequenceTN      ?constantg      ?gp?	do_resizesizeresampledo_padpadding_valuepadding_modedo_normalize
image_mean	image_std
do_rescalerescale_factor
patch_sizec                     t        |   di | || _        ||nddd| _        || _        || _        || _        || _        || _        || _	        |	| _
        |
| _        || _        ||| _        y ddd| _        y )Ni8  i  )heightwidth   r0   )super__init__ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   )r<   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rG   	__class__s                 r&   rr   zFuyuImageProcessor.__init__   s      	"6"" ,DTD2Q	 *(($"$,(2(>*r\^D_r5   r%   data_formatinput_data_formatr   c           	          t        ||      \  }}|d   |d   }
}	||
k  r||	k  r|S |	|z  }|
|z  }t        ||      }t        ||z        }t        ||z        }t        d|||f|||d|}|S )a  
        Resize an image to `(size["height"], size["width"])`.

        Args:
            image (`np.ndarray`):
                Image to resize.
            size (`dict[str, int]`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
            resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
                `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
            data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the output image. If unset, the channel dimension format of the input
                image is used. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.

        Returns:
            `np.ndarray`: The resized image.
        rn   ro   )r%   rb   rc   rt   ru   r0   )r   minrL   r
   )r<   r%   rb   rc   rt   ru   rG   image_heightimage_widthtarget_heighttarget_widthheight_scale_factorwidth_scale_factoroptimal_scale_factor
new_height	new_widthscaled_images                    r&   r
   zFuyuImageProcessor.resize
  s    F %35:K$L!k&*8nd7m|,&<=+HL+l:)K7"#68JK(<<=
&::;	 
i(#/
 
 r5   modeconstant_valuesc                     t        ||      \  }}|d   |d   }
}	d}d}|	|z
  }|
|z
  }t        |||f||ff||||      }|S )a  
        Pad an image to `(size["height"], size["width"])`.

        Args:
            image (`np.ndarray`):
                Image to pad.
            size (`dict[str, int]`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
            data_format (`ChannelDimension` or `str`, *optional*):
                The data format of the output image. If unset, the same format as the input image is used.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format of the input image. If not provided, it will be inferred.
        rn   ro   r   )paddingr   r   rt   ru   )r   r	   )r<   r%   rb   r   r   rt   ru   rx   ry   rz   r{   padding_toppadding_leftpadding_bottompadding_rightpadded_images                   r&   	pad_imagezFuyuImageProcessor.pad_imageD  sx    , %35:K$L!k&*8nd7m|&5${2!>2\=4QR+#/
 r5   return_tensorsc                    ||n| j                   }||n| j                  }||n| j                  }||n| j                  }||n| j                  }||n| j
                  }||n| j                  }|	|	n| j                  }	|
|
n| j                  }
||n| j                  }||n| j                  }||n| j                  }||n| j
                  }||n| j                  }t        |t              rt        d |D              rt        d      t!        |      }t#        ||||	|
|||||
       |D cg c]  }|D cg c]  }t%        |       c} }}}|r&t'        |d   d         rt(        j+                  d       |t-        |d   d         }|D cg c]  }t/        |d   |       }}t1        |      }|r1|D cg c]$  }|D cg c]  }| j3                  |||       c}& }}}|D cg c]  }t/        |d   |       }}|D cg c]  }|d   g
 }}|D cg c]  }|d   g
 }}t5        ||      D cg c]  \  }}|d   |d   z  g }}}|r3|D cg c]&  }|D cg c]  }| j7                  |||||	       c}( }}}|r1|D cg c]$  }|D cg c]  }| j9                  |||
       c}& }}}|r2|D cg c]%  }|D cg c]  }| j;                  ||	|
|       c}' }}}|*|D cg c]  }|D cg c]  }t=        |||       c} }}}||||d}t?        ||      S c c}w c c}}w c c}w c c}w c c}}w c c}w c c}w c c}w c c}}w c c}w c c}}w c c}w c c}}w c c}w c c}}w c c}w c c}}w )a  

        Utility function to preprocess the images and extract necessary information about original formats.

        Args:
            images (`ImageInput`):
                Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel
                values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image to `size`.
            size (`dict[str, int]`, *optional*, defaults to `self.size`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
            resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
                `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
            do_pad (`bool`, *optional*, defaults to `self.do_pad`):
                Whether to pad the image to `size`.
            padding_value (`float`, *optional*, defaults to `self.padding_value`):
                The value to pad the image with.
            padding_mode (`str`, *optional*, defaults to `self.padding_mode`):
                The padding mode to use when padding the image.
            do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
                Whether to normalize the image.
            image_mean (`float`, *optional*, defaults to `self.image_mean`):
                The mean to use when normalizing the image.
            image_std (`float`, *optional*, defaults to `self.image_std`):
                The standard deviation to use when normalizing the image.
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image.
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                The factor to use when rescaling the image.
            patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
            return_tensors (`str` or `TensorType`, *optional*):
                The type of tensors to return. Can be one of:
                - Unset: Return a list of `np.ndarray`.
                - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
                - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
                - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
                - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
            data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format of the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
        c              3   \   K   | ]$  }t        |t              xr t        |      d k\   & yw)   N)r"   r#   rJ   )r$   r1   s     r&   r'   z0FuyuImageProcessor.preprocess.<locals>.<genexpr>  s)     +iZ^JtT,B,Us4yTU~,U+is   *,z:Multiple images for a single sample are not yet supported.)
rj   rk   rg   rh   ri   rd   size_divisibilityra   rb   rc   r   zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.)channel_dim)rb   ru      )rb   r   r   ru   )scaleru   )meanstdru   )r   image_unpadded_heightsimage_unpadded_widthsimage_scale_factors)rN   r-   ) ra   rb   rc   rd   rj   rk   rg   rh   ri   re   rf   rl   r"   r#   anyr)   r*   r   r   r   loggerwarning_oncer   r   r   r
   zipr   rescale	normalizer   r,   )r<   r   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rt   ru   r   batch_imagesr%   original_image_sizesimage_sizes
image_sizer   r   original_sizeresized_sizer   rN   s                               r&   
preprocesszFuyuImageProcessor.preprocessj  sL   L "+!6IDNN	'tTYY'38!-4;;#-#9Zt
+9+E4K^K^'3'?|TEVEV#-#9Zt
!*!6IDNN	)6)BHZHZ'3'?|TEVEV#-#9Zt
+9+E4K^K^#-#9Zt
fd#+ibh+i(iYZZ26:%!)%!"	
 S__FC5.C__/,q/!*<=s
 $ >|Aq?Q Rgst]cvayFW XttT" + bhhX]UARShL 
 _kkTZ~fQi=NOkkDO!Pj:a=/!P!PCN OZ*Q- O O
 033G/U
+| !_}Q//0
 

  +  "(	  NN!)(5*; # 	L   + nttdie>M^_tL 
  +
  "( NN5zy\mNnL  " + bhhX],UKARShL  #&<%:#6	
  T~FFS D_  u
 i
 l!P O
	  u is   3	L8<L3L8L> 	M	M$M0MM M>M	M('M#M(	M3M.7M3	M>M9+M>:	N	NN	3L8M#M(.M39M>N	rx   ry   c                     ||n| j                   }| j                   d   | j                   d   }}||z  dk7  rt        d|d|       ||z  dk7  rt        d|d|       ||z  }||z  }||z  }|S )a  
        Calculate number of patches required to encode an image.

        Args:
            image_height (`int`):
                Height of the image.
            image_width (`int`):
                Width of the image.
            patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
        rn   ro   r   zimage_height=z must be divisible by zimage_width=)rl   r)   )	r<   rx   ry   rl   patch_heightpatch_widthnum_patches_per_dim_hnum_patches_per_dim_wnum_patchess	            r&   get_num_patchesz"FuyuImageProcessor.get_num_patches  s     $.#9Zt
$(OOH$=tw?Wk,&!+.D\NSTT$)~-CK=QRR , < +{ :+.CCr5   ztorch.Tensorc                 h   t        | dg       ||n| j                  }|d   |d   }}|j                  \  }}}}|j                  d||      }|j                  d||      }	|	j	                         }	|	j                  ||d||      }	|	j                  ddddd	      }	|	j                  |d||z  |z        }	|	S )
a|  
        Convert an image into a tensor of patches.

        Args:
            image (`torch.Tensor`):
                Image to convert. Shape: [batch, channels, height, width]
            patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
        rA   rn   ro   r   r   r      r   )r   rl   shapeunfold
contiguousviewpermutereshape)
r<   r%   rl   r   r   
batch_sizechannels_unfolded_along_heightpatchess
             r&   patchify_imagez!FuyuImageProcessor.patchify_image5  s     	$	*#-#9Zt
$.x$8*W:Mk
 &+[["
Ha %Ql K'..q+{K$$&,,z8R{S//!Q1a0//*b(\2IK2WXr5   image_inputimage_presentimage_unpadded_himage_unpadded_wimage_placeholder_idimage_newline_idvariable_sizedc	           
         t        | dg       ||n| j                  }|d   |d   }
}	g }g }g }t        |j                  d         D ]\  }g }g }t        |j                  d         D ]  }|||f   r|||f   }|j                  d   |j                  d   }}|rft	        |t        j                  |||f   |	z        |	z        }t	        |t        j                  |||f   |
z        |
z        }|ddd|d|f   }||}}| j                  ||      }t        j                  |g|t        j                  |j                  	      }| j                  |j                  d      
      j                  d      }||j                  d   k(  sJ |r|j                  d||
z        }t        j                  |j                  d   dg|t        j                  |j                  	      }t        j                   ||gd      }|j                  d      }|j#                  |g       |j#                  |       |j#                  |       |j#                  t        j$                  g t        j                  |j                  	              |j#                  |       |j#                  |       _ g }g }|D ]  }d}g }g } |D ]  }!|!|k(  }"t        j&                  |"      }t        j(                  |t        j*                  |!j                  	      j-                  |!      }#t        j.                  |!d      }$t        j.                  |!d      }%t        j0                  |"d      d   }&|#|z   |$|&<   |#|%|&<   |j#                  |$       | j#                  |%       ||z  } |j#                  |       |j#                  |         t3        |||||d      S )a  Process images for model input. In particular, variable-sized images are handled here.

        Args:
            image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]):
                Tensor of images padded to model input size.
            image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]):
                Tensor of 1s and 0s indicating whether an image is present.
            image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]):
                Tensor of unpadded image heights.
            image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]):
                Tensor of unpadded image widths.
            image_placeholder_id (int):
                The id of the image placeholder token. Comes from an associated tokenizer.
            image_newline_id (int):
                The id of the image newline token. Comes from an associated tokenizer.
            variable_sized (bool):
                Whether to process images as variable-sized.
            patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
                Size of the patches.
        rA   Nrn   ro   r   r   r   )rx   ry   )dtyperB   )r%   r   )dimT)as_tuplerZ   )rN   )r   rl   ranger   rw   mathceilr   rA   fullint32rB   r   	unsqueezesqueezer   catrM   tensorcount_nonzeroarangeint64type_as	full_likenonzeror,   )'r<   r   r   r   r   r   r   r   rl   r   r   r   batch_image_patchesbatch_image_input_idsbatch_indexr[   r\   subseq_indexr%   rx   ry   new_hnew_wr   tensor_of_image_idsr   newline_idsr]   r^   sample_image_input_idsindex_offsetper_batch_indicesper_subsequence_indicessubseq_image_input_idspatches_maskindicesindices_in_stream_per_batch!indices_in_stream_per_subsequencepatches_indss'                                          r&   preprocess_with_tokenizer_infoz1FuyuImageProcessor.preprocess_with_tokenizer_infoO  s   > 	$	*#-#9Zt
$.x$8*W:Mk ,.8::< !2!21!56 /	6K OM %k&7&7&: ; )k l!:;'\(ABE05AA+L% !$( II&6{L7P&QT`&`adpp! !$' II&6{L7P&QT_&_`cnn! !&a%%&7 8495k"&"6"6L^i"6"jK*/**$';5;;WbWiWi+' #118J1KSSTUVG&'--*::::%.A.I.I"k]hNh.i+&+jj066q91=,"'++#.#5#5	' /4ii9Lk8Z`a.b+.A.I.I".M+MM5'*#**+>?!((1#**5<<%++VaVhVh+ijS)kV "((9&&}5_/	6f CE%HJ+&; 	P"L "&(#*@ ,&59MM#11,?,,{%++NdNkNkltt*
 /4oo>TVX.Y+49OODZ\^4_1$}}\DI!L<Cl<R+L9BI1,?!(()DE'../PQ+%,( *001BC/667NO3	P6   #8!41N7Z
 	
r5   )r`   r_   NNr!   )rT   rU   rV   rW   model_input_namesr   BILINEARboolr   dictrK   rL   floatr   r#   rr   npndarrayr   r
   r   r   FIRSTr   r   r   r   r,   r   __classcell__)rs   s   @r&   rY   rY      sJ   +Z )-'9'B'B"&!03/2 '/3`` tCH~&` %	`
 ` ` ` ` %e,-` U+,` ` ` T#s(^,`D (:'B'B>BDH8zz8 38n8 %	8
 eC)9$9:;8 $E#/?*?$@A8 
8| !$>BDH$zz$ 38n$ 	$
 $ eC)9$9:;$ $E#/?*?$@A$ 
$L %& %))-15!%)-&*'+&*%)%)*./3>N>T>TDH/3#oG D>oG tCH~&	oG
 -.oG oG  oG smoG tnoG UOoG E?oG TNoG !oG T#s(^,oG eC)9$9:;oG  $E#/?*?$@A!oG" !,#oG 'oGbC c xX\]`be]eXfOg sv 2N cSVh@X dr F 04B
#B
 &B
 )	B

 )B
 "B
 B
 B
 T#s(^,B
 
B
r5   rY   )+rW   r   typingr   r   numpyr   image_processing_utilsr   r   r   image_transformsr	   r
   r   image_utilsr   r   r   r   r   r   r   r   r   r   utilsr   r   r   r   r   r   r   rA   
get_loggerrT   r   r#   r*   r,   rY   __all__r0   r5   r&   <module>r      s    &  "  U U 
      
		H	%a$tJ'($z*:JFGa	$z
aj| jZ[
+ [
|  
 r5   