
    rh"                    V   d Z ddlmZ ddlmZ ddlmZmZmZ ddl	Z	ddl
Z	ddl	mZmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlmZ ddlmZmZmZmZmZ ddlm Z m!Z!m"Z"  e       rddl#m$Z$  ejJ                  e&      Z'de	j                  de	j                  fdZ(de	j                  de	j                  fdZ)ee G d de                    Z*dedefdZ+dedefdZ,d Z-d Z.e ed       G d  d!e                    Z/e ed"       G d# d$e                    Z0 G d% d&ejb                        Z2 G d' d(ejb                        Z3 G d) d*ejb                        Z4 G d+ d,ejb                        Z5 G d- d.e      Z6e G d/ d0e             Z7 G d1 d2ejb                        Z8 G d3 d4ejb                        Z9 G d5 d6e7      Z: G d7 d8ejb                        Z; G d9 d:e7      Z<e G d; d<e7             Z= G d= d>ejb                        Z> G d? d@ejb                        Z? G dA dBe7      Z@g dCZAy)DzPyTorch OWL-ViT model.    )	dataclass)	lru_cache)AnyOptionalUnionN)Tensornn   )ACT2FN) _create_4d_causal_attention_mask_prepare_4d_attention_mask)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPooling)PreTrainedModel)ModelOutputauto_docstringis_vision_availablelogging	torch_int   )OwlViTConfigOwlViTTextConfigOwlViTVisionConfig)center_to_corners_formatlogitsreturnc                     t         j                  j                  | t        j                  t        |       | j                              S )Ndevice)r	   
functionalcross_entropytorcharangelenr    )r   s    }/var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/owlvit/modeling_owlvit.pycontrastive_lossr'   -   s/    ==&&vu||CKPVP]P]/^__    
similarityc                 Z    t        |       }t        | j                               }||z   dz  S )Ng       @)r'   t)r)   caption_loss
image_losss      r&   owlvit_lossr.   2   s,    #J/L!*,,.1J:%,,r(   c                      e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eej                     ed<   dZeej                     ed<   dZeej                     ed<   dZeed<   dZeed	<   d
ee   fdZy)OwlViTOutputa  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
        Contrastive loss for image-text similarity.
    logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
        The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
        similarity scores.
    logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
        The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
        similarity scores.
    text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`):
        The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`].
    image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
        The image embeddings obtained by applying the projection layer to the pooled output of
        [`OwlViTVisionModel`].
    text_model_output (tuple[`BaseModelOutputWithPooling`]):
        The output of the [`OwlViTTextModel`].
    vision_model_output (`BaseModelOutputWithPooling`):
        The output of the [`OwlViTVisionModel`].
    Nlosslogits_per_imagelogits_per_texttext_embedsimage_embedstext_model_outputvision_model_outputr   c                 H     t         fd j                         D              S )Nc              3   d   K   | ]'  }|d vr|   nt        |      j                          ) yw)r6   r7   Ngetattrto_tuple.0kselfs     r&   	<genexpr>z(OwlViTOutput.to_tuple.<locals>.<genexpr>X   =      
  LLDGRYZ^`aRbRkRkRmm
   -0tuplekeysrA   s   `r&   r=   zOwlViTOutput.to_tupleW   #     
YY[
 
 	
r(   )__name__
__module____qualname____doc__r1   r   r#   FloatTensor__annotations__r2   r3   r4   r5   r6   r   r7   rF   r   r=    r(   r&   r0   r0   8   s    ( )-D(5$$
%,48hu001837OXe//07/3K%++,304L(5,,-448186:3:
%* 
r(   r0   r+   c                    | j                         r>| j                  t        j                  t        j                  fv r| S | j                         S | j                  t        j                  t        j                  fv r| S | j                         S N)	is_floating_pointdtyper#   float32float64floatint32int64int)r+   s    r&   _upcastr[   _   s`    GGu}}==qL1779LGGU[[99qFquuwFr(   boxesc                 f    t        |       } | dddf   | dddf   z
  | dddf   | dddf   z
  z  S )a  
    Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.

    Args:
        boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
            Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
            < x2` and `0 <= y1 < y2`.

    Returns:
        `torch.FloatTensor`: a tensor containing the area for each box.
    N   r   r
   r   )r[   )r\   s    r&   box_arear_   h   sB     ENE!Q$K%1+%%1+ad*CDDr(   c                 ^   t        |       }t        |      }t        j                  | d d d d df   |d d d df         }t        j                  | d d d dd f   |d d dd f         }||z
  j	                  d      }|d d d d df   |d d d d df   z  }|d d d f   |z   |z
  }||z  }	|	|fS )Nr^   r   minr   )r_   r#   maxrb   clamp)
boxes1boxes2area1area2left_topright_bottomwidth_heightinterunionious
             r&   box_iouro   y   s    VEVEyy4!,fQUm<H99VAtQRK0&AB-@L 8+22q29LAq!LAq$99E!T'NU"U*E
%-C:r(   c                    | ddddf   | ddddf   k\  j                         st        d|        |ddddf   |ddddf   k\  j                         st        d|       t        | |      \  }}t        j                  | dddddf   |ddddf         }t        j
                  | dddddf   |ddddf         }||z
  j                  d      }|dddddf   |dddddf   z  }|||z
  |z  z
  S )z
    Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.

    Returns:
        `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
    Nr^   z<boxes1 must be in [x0, y0, x1, y1] (corner) format, but got z<boxes2 must be in [x0, y0, x1, y1] (corner) format, but got r   ra   r   )all
ValueErrorro   r#   rb   rc   rd   )re   rf   rn   rm   top_leftbottom_rightrk   areas           r&   generalized_box_iourv      s*    1ab5MVArrE]*//1WX^W_`aa1ab5MVArrE]*//1WX^W_`aa(JCyy4!,fQUm<H99VAtQRK0&AB-@L 8+22q29L1a <1a#88D$,$&&&r(   z6
    Output type of [`OwlViTForObjectDetection`].
    )custom_introc                   D   e Zd ZU dZdZeej                     ed<   dZ	ee
   ed<   dZeej                     ed<   dZeej                     ed<   dZeej                     ed<   dZeej                     ed<   dZeej                     ed	<   dZeed
<   dZeed<   dee   fdZy)OwlViTObjectDetectionOutputa  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
        Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
        bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
        scale-invariant IoU loss.
    loss_dict (`Dict`, *optional*):
        A dictionary containing the individual losses. Useful for logging.
    logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
        Classification logits (including no-object) for all queries.
    pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
        Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
        values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
        possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the
        unnormalized bounding boxes.
    text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`):
        The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`].
    image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
        Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
        image embeddings for each patch.
    class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
        Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total
        number of patches is (image_size / patch_size)**2.
    text_model_output (tuple[`BaseModelOutputWithPooling`]):
        The output of the [`OwlViTTextModel`].
    vision_model_output (`BaseModelOutputWithPooling`):
        The output of the [`OwlViTVisionModel`].
    Nr1   	loss_dictr   
pred_boxesr4   r5   class_embedsr6   r7   r   c                 H     t         fd j                         D              S )Nc              3   d   K   | ]'  }|d vr|   nt        |      j                          ) ywr:   r;   r>   s     r&   rB   z7OwlViTObjectDetectionOutput.to_tuple.<locals>.<genexpr>   rC   rD   rE   rH   s   `r&   r=   z$OwlViTObjectDetectionOutput.to_tuple   rI   r(   )rJ   rK   rL   rM   r1   r   r#   rN   rO   rz   dictr   r{   r4   r5   r|   r6   r   r7   rF   r   r=   rP   r(   r&   ry   ry      s    8 )-D(5$$
%, $Ix~$*.FHU&&'..2J**+2/3K%++,304L(5,,-404L(5,,-448186:3:
%* 
r(   ry   zM
    Output type of [`OwlViTForObjectDetection.image_guided_detection`].
    c                   0   e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eej                     ed<   dZeej                     ed<   dZeej                     ed<   dZeej                     ed<   dZeed	<   dZeed
<   dee   fdZy)&OwlViTImageGuidedObjectDetectionOutputa  
    logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
        Classification logits (including no-object) for all queries.
    image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
        Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
        image embeddings for each patch.
    query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
        Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
        image embeddings for each patch.
    target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
        Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
        values are normalized in [0, 1], relative to the size of each individual target image in the batch
        (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to
        retrieve the unnormalized bounding boxes.
    query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
        Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
        values are normalized in [0, 1], relative to the size of each individual query image in the batch
        (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to
        retrieve the unnormalized bounding boxes.
    class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
        Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total
        number of patches is (image_size / patch_size)**2.
    text_model_output (tuple[`BaseModelOutputWithPooling`]):
        The output of the [`OwlViTTextModel`].
    vision_model_output (`BaseModelOutputWithPooling`):
        The output of the [`OwlViTVisionModel`].
    Nr   r5   query_image_embedstarget_pred_boxesquery_pred_boxesr|   r6   r7   r   c                 H     t         fd j                         D              S )Nc              3   d   K   | ]'  }|d vr|   nt        |      j                          ) ywr:   r;   r>   s     r&   rB   zBOwlViTImageGuidedObjectDetectionOutput.to_tuple.<locals>.<genexpr>  rC   rD   rE   rH   s   `r&   r=   z/OwlViTImageGuidedObjectDetectionOutput.to_tuple  rI   r(   )rJ   rK   rL   rM   r   r   r#   rN   rO   r5   r   r   r   r|   r6   r   r7   rF   r   r=   rP   r(   r&   r   r      s    8 +/FHU&&'.04L(5,,-46:!2!23:59x 1 12948hu001804L(5,,-448186:3:
%* 
r(   r   c                        e Zd Zdef fdZdej                  dededej                  fdZddej                  d	e
dej                  fd
Z xZS )OwlViTVisionEmbeddingsconfigc                    t         |           |j                  | _        || _        |j                  | _        t        j                  t        j                  |j                              | _
        t        j                  |j                  | j
                  |j                  |j                  d      | _        |j                  |j                  z  dz  | _        | j                  dz   | _        t        j"                  | j                   | j
                        | _        | j'                  dt        j(                  | j                         j+                  d      d       y )NF)in_channelsout_channelskernel_sizestridebiasr^   r   position_idsr   
persistent)super__init__
patch_sizer   hidden_size	embed_dimr	   	Parameterr#   randnclass_embeddingConv2dnum_channelspatch_embedding
image_sizenum_patchesnum_positions	Embeddingposition_embeddingregister_bufferr$   expandrA   r   	__class__s     r&   r   zOwlViTVisionEmbeddings.__init__
  s    ++++!||EKK8J8J,KL!yy++))$$ 
 #--1B1BBqH!--1"$,,t/A/A4>>"R^U\\$:L:L-M-T-TU\-]jopr(   
embeddingsheightwidthr   c                    |j                   d   dz
  }| j                  j                  j                  d      }|j                   d   dz
  }t        j
                  j                         s%||k(  r ||k(  r| j                  | j                        S |ddddf   }|ddddf   }|j                   d   }	|| j                  z  }
|| j                  z  }t        |dz        }|j                  d|||	      }|j                  dddd      }t        j                  j                  ||
|fdd	
      }|j                  dddd      j                  dd|	      }t	        j                   ||fd      S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   r   Nr   g      ?r
   r^   bicubicF)sizemodealign_cornersdim)shaper   weight	unsqueezer#   jit
is_tracingr   r   r   reshapepermuter	   r!   interpolateviewcat)rA   r   r   r   r   r   r   class_pos_embedpatch_pos_embedr   
new_height	new_widthsqrt_num_positionss                r&   interpolate_pos_encodingz/OwlViTVisionEmbeddings.interpolate_pos_encoding  sv    !&&q)A-!44;;EEaH*003a7 yy##%+*F6UZ?**4+<+<==,QU3,QU3r"t.
T__,	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nyy/?;CCr(   pixel_valuesr   c                 h   |j                   \  }}}}| j                  |      }|j                  d      j                  dd      }| j                  j                  |dd      }t        j                  ||gd      }	|r|	| j                  |	||      z   }	|	S |	| j                  | j                        z   }	|	S )Nr^   r   r   r   )r   r   flatten	transposer   r   r#   r   r   r   r   )
rA   r   r   
batch_size_r   r   patch_embedsr|   r   s
             r&   forwardzOwlViTVisionEmbeddings.forwardE  s    '3'9'9$
Avu++L9#++A.88A>++22:q"EYYl;C
##d&C&CJPVX]&^^J  $d&=&=d>O>O&PPJr(   F)rJ   rK   rL   r   r   r#   r   rZ   r   rN   boolr   __classcell__r   s   @r&   r   r   	  sm    q1 q*$D5<< $D $DUX $D]b]i]i $DL
E$5$5 
QU 
bgbnbn 
r(   r   c            	            e Zd Zdef fdZ	 	 	 ddeej                     deej                     deej                     dej                  fdZ
 xZS )	OwlViTTextEmbeddingsr   c                 ^   t         |           t        j                  |j                  |j
                        | _        t        j                  |j                  |j
                        | _        | j                  dt        j                  |j                        j                  d      d       y )Nr   r   Fr   )r   r   r	   r   
vocab_sizer   token_embeddingmax_position_embeddingsr   r   r#   r$   r   r   s     r&   r   zOwlViTTextEmbeddings.__init__S  s    !||F,=,=v?Q?QR"$,,v/M/MvOaOa"b 	ELL)G)GHOOPWXej 	 	
r(   	input_idsr   inputs_embedsr   c                     ||j                   d   n|j                   d   }|| j                  d d d |f   }|| j                  |      }| j                  |      }||z   }|S )Nr   )r   r   r   r   )rA   r   r   r   
seq_lengthposition_embeddingsr   s          r&   r   zOwlViTTextEmbeddings.forward]  s{     -6,AY__R(}GZGZ[]G^
,,Q^<L  00;M"55lC"%88
r(   )NNN)rJ   rK   rL   r   r   r   r#   
LongTensorrN   r   r   r   r   s   @r&   r   r   R  sk    
/ 
 153759	E,,- u//0   1 12	
 
r(   r   c                       e Zd ZdZ fdZdej                  dedefdZ	 	 	 ddej                  de	ej                     d	e	ej                     d
e	e
   deej                  e	ej                     e	eej                        f   f
dZ xZS )OwlViTAttentionz=Multi-headed attention from 'Attention Is All You Need' paperc                 
   t         |           || _        |j                  | _        |j
                  | _        | j                  | j                  z  | _        | j                  | j                  z  | j                  k7  r&t        d| j                   d| j                   d      | j                  dz  | _	        |j                  | _        t        j                  | j                  | j                        | _        t        j                  | j                  | j                        | _        t        j                  | j                  | j                        | _        t        j                  | j                  | j                        | _        y )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).      )r   r   r   r   r   num_attention_heads	num_headshead_dimrr   scaleattention_dropoutdropoutr	   Lineark_projv_projq_projout_projr   s     r&   r   zOwlViTAttention.__init__t  s   ++33$..8==4>>)T^^;MdnnM] ^NN#2'  ]]D(
//ii?ii?ii?		$..$..Ar(   tensorseq_lenbszc                     |j                  ||| j                  | j                        j                  dd      j	                         S )Nr   r^   )r   r   r   r   
contiguous)rA   r   r   r   s       r&   _shapezOwlViTAttention._shape  s7    {{3GQQRSUVWbbddr(   hidden_statesattention_maskcausal_attention_maskoutput_attentionsr   c                    |j                         \  }}}| j                  |      | j                  z  }| j                  | j	                  |      d|      }	| j                  | j                  |      d|      }
|| j                  z  d| j                  f} | j                  |||      j                  | } |	j                  | }	 |
j                  | }
|	j                  d      }t        j                  ||	j                  dd            }|j                         || j                  z  ||fk7  r/t        d|| j                  z  ||f d|j                                |{|j                         |d||fk7  r#t        d|d||f d|j                                |j                  || j                  ||      |z   }|j                  || j                  z  ||      }|{|j                         |d||fk7  r#t        d|d||f d|j                                |j                  || j                  ||      |z   }|j                  || j                  z  ||      }t        j                  j                  |d      }|r?|j                  || j                  ||      }|j                  || j                  z  ||      }nd}t        j                  j!                  || j                   | j"                  	      }|j%                  |
j&                        }t        j                  ||
      }|j                         || j                  z  || j                  fk7  r7t        d
|| j                  || j                  f d|j                                |j                  || j                  || j                        }|j                  dd      }|j)                  |||      }| j+                  |      }||fS )z#Input shape: Batch x Time x Channelr   r   r^   z$Attention weights should be of size z	, but is Nz!Attention mask should be of size r   )ptrainingz `attn_output` should be of size )r   r   r   r   r   r   r   r   r   r#   bmmr   rr   r	   r!   softmaxr   r   torT   r   r   )rA   r   r   r   r   r   tgt_lenr   query_states
key_statesvalue_states
proj_shapesrc_lenattn_weightsattn_weights_reshaped
attn_probsattn_outputs                    r&   r   zOwlViTAttention.forward  s    #0"4"4"6Wi {{=1DJJ>[[]!;RE
{{4;;}#=r3GDNN*B>
Ct{{<#>CCZP$Z__j1
(|((*5//!$yyz/C/CAq/IJ3#7'"JJ6dnn8LgW^7_6` a %%'(*  !,$))+Q/II 7a'8R7S T-22457  (,,S$..'7SVkkL',,S4>>-A7GTL%""$a'(BB 7a'8R7SS\]k]p]p]r\st  (,,S$..'7SVddL',,S4>>-A7GTL}},,\r,B
 %1$5$5c4>>7T[$\!055cDNN6JGU\]L$(!]]**<4<<RVR_R_*`
  ]]<#5#56
ii
L9#"6!OO2CRVR_R_3`2a b$$&') 
 "&&sDNNGT]]S!++Aq1!))#w	BmmK0111r(   NNF)rJ   rK   rL   rM   r   r#   r   rZ   r   r   r   rF   r   r   r   s   @r&   r   r   q  s    GB&eU\\ eC ec e 268<,1O2||O2 !.O2  (5	O2
 $D>O2 
u||Xell3XeELL>Q5RR	SO2r(   r   c                   V     e Zd Z fdZdej
                  dej
                  fdZ xZS )	OwlViTMLPc                    t         |           || _        t        |j                     | _        t        j                  |j                  |j                        | _
        t        j                  |j                  |j                        | _        y rR   )r   r   r   r   
hidden_actactivation_fnr	   r   r   intermediate_sizefc1fc2r   s     r&   r   zOwlViTMLP.__init__  sd    #F$5$5699V//1I1IJ99V55v7I7IJr(   r   r   c                 l    | j                  |      }| j                  |      }| j                  |      }|S rR   )r  r
  r  )rA   r   s     r&   r   zOwlViTMLP.forward  s4    /**=9/r(   )rJ   rK   rL   r   r#   r   r   r   r   s   @r&   r  r    s$    KU\\ ell r(   r  c                        e Zd Zdef fdZ	 d	dej                  dej                  dej                  dee   de	ej                     f
dZ xZS )
OwlViTEncoderLayerr   c                 D   t         |           |j                  | _        t	        |      | _        t        j                  | j                  |j                        | _	        t        |      | _        t        j                  | j                  |j                        | _        y N)eps)r   r   r   r   r   	self_attnr	   	LayerNormlayer_norm_epslayer_norm1r  mlplayer_norm2r   s     r&   r   zOwlViTEncoderLayer.__init__  sm    ++(0<<F<Q<QRV$<<F<Q<QRr(   r   r   r   r   r   c                     |}| j                  |      }| j                  ||||      \  }}||z   }|}| j                  |      }| j                  |      }||z   }|f}|r||fz  }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   r   r   r   )r  r  r  r  )rA   r   r   r   r   residualr  outputss           r&   r   zOwlViTEncoderLayer.forward  s    " !((7&*nn')"7/	 '5 '
#| !=0 ((7/ =0 "&Gr(   r   )rJ   rK   rL   r   r   r#   r   r   r   rF   rN   r   r   r   s   @r&   r  r    sf    S| S -2&||& &  %||	&
 $D>& 
u  	!&r(   r  c                   F    e Zd ZU eed<   dZdZdgZdej                  fdZ
y)OwlViTPreTrainedModelr   owlvitTr  modulec                 8
   | j                   j                  }t        |t              rj|j                  j
                  j                  j                  d|dz         |j                  j
                  j                  j                  d|dz         nt        |t              rt        j                  j                  |j                  d|j                  dz  |z         t        j                  j                  |j                  j
                  |j                   j                  |z         t        j                  j                  |j                  j
                  |j                   j                  |z         nt        |t               r|j                  dz  d|j                   j"                  z  dz  z  |z  }|j                  dz  |z  }t        j                  j                  |j$                  j
                  |       t        j                  j                  |j&                  j
                  |       t        j                  j                  |j(                  j
                  |       t        j                  j                  |j*                  j
                  |       nt        |t,              r|j                   j.                  dz  d|j                   j"                  z  dz  z  |z  }d|j                   j.                  z  dz  |z  }t        j                  j                  |j0                  j
                  |       t        j                  j                  |j2                  j
                  |       nt        |t4              rt        j                  j                  |j6                  j
                  |j8                  dz  |z         t        j                  j                  |j:                  j
                  |j<                  dz  |z         |j>                  j                  jA                  | j                   jB                         t        |t        jD                        rI|jF                  j                  jI                          |j
                  j                  jA                  d       t        |t        jJ                        rY|j
                  j                  j                  d|       |jF                  %|jF                  j                  jI                          yyy)	zInitialize the weights        g{Gz?)meanstdr   )r$  r^         ?N)&r   initializer_factor
isinstancer   r   r   datanormal_r   r   r	   initr   r   r   initializer_ranger   num_hidden_layersr   r   r   r   r  r   r  r  OwlViTModeltext_projectiontext_embed_dimvisual_projectionvision_embed_dimlogit_scalefill_logit_scale_init_valuer  r   zero_r   )rA   r   factorin_proj_stdout_proj_stdfc_stds         r&   _init_weightsz#OwlViTPreTrainedModel._init_weights&  s   //f23""))..66CVd]6S%%,,1199sQU9V 67GGOOF22&BRBRTXBX[aBaObGGOOF2299v}}?^?^ag?gOhGGOOF55<<&--BaBadjBjOk0!++T1q6==;Z;Z7Z_c6cdgmmK",,d2f<LGGOOFMM00kOBGGOOFMM00kOBGGOOFMM00kOBGGOOFOO22OE	*!==44d:FMMDcDc@chl?lmpvvK&--333<vEFGGOOFJJ--6O:GGOOFJJ--;O?,GGOO&&--))4/&8   GGOO((//++T1F:   ##))$++*L*LMfbll+KK""$MM$$S)fbii(MM&&CV&<{{&  &&( ' )r(   N)rJ   rK   rL   r   rO   base_model_prefixsupports_gradient_checkpointing_no_split_modulesr	   Moduler:  rP   r(   r&   r  r    s-     &*#-.&)BII &)r(   r  c                        e Zd ZdZdef fdZ	 	 	 	 	 ddeej                     deej                     dee	   dee	   dee	   d	e
eef   fd
Z xZS )OwlViTEncoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`OwlViTEncoderLayer`].

    Args:
        config: OwlViTConfig
    r   c                     t         |           t        j                  t	        |j
                        D cg c]  }t        |       c}      | _        d| _        y c c}w )NF)	r   r   r	   
ModuleListranger,  r  layersgradient_checkpointing)rA   r   r   r   s      r&   r   zOwlViTEncoder.__init__X  sH    mmvOgOgIh$iA%7%?$ij&+# %js   Ar   r   r   output_hidden_statesreturn_dictr   c                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|rdnd}|rdnd}|}	| j                  D ]'  }
|r||	fz   } |
|	|||      }|d   }	|s||d   fz   }) |r||	fz   }|st        d |	||fD              S t        |	||      S )a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`).
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
                [What are attention masks?](../glossary#attention-mask)
            causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Causal mask for the text model. Mask values selected in `[0, 1]`:
                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        NrP   )r   r   r   c              3   &   K   | ]	  }||  y wrR   rP   )r?   vs     r&   rB   z(OwlViTEncoder.forward.<locals>.<genexpr>  s     eqWXWde   )last_hidden_stater   
attentions)r   r   rF  use_return_dictrD  rF   r   )rA   r   r   r   r   rF  rG  encoder_statesall_attentionsr   encoder_layerlayer_outputss               r&   r   zOwlViTEncoder.forward]  s   > 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]30d%![[ 	FM#!/=2B!B)%"3	M *!,M !/=3C2E!E	F  +}.>>Ne]NN$Seee+>Vd
 	
r(   NNNNN)rJ   rK   rL   rM   r   r   r   r#   r   r   r   rF   r   r   r   r   s   @r&   r@  r@  O  s    ,| , 268<,0/3&*?
 !.?
  (5	?

 $D>?
 'tn?
 d^?
 
uo%	&?
r(   r@  c                        e Zd Zdef fdZe	 	 	 	 	 ddej                  deej                     deej                     dee	   dee	   dee	   d	e
eef   fd
       Z xZS )OwlViTTextTransformerr   c                     t         |           || _        |j                  }t	        |      | _        t        |      | _        t        j                  ||j                        | _        y r  )r   r   r   r   r   r   r@  encoderr	   r  r  final_layer_norm)rA   r   r   r   s      r&   r   zOwlViTTextTransformer.__init__  sQ    &&	.v6$V, "YF<Q<Q Rr(   r   r   r   r   rF  rG  r   c                 ,   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|j	                         }|j                  d|d         }| j                  ||      }t        ||j                  |j                        }	|t        ||j                        }| j                  |||	|||      }
|
d   }| j                  |      }|t        j                  |j                  d   |j                        |j!                  t        j"                        j%                  d      j!                  |j                        f   }|s
||f|
dd z   S t'        |||
j(                  |
j*                  	      S )
a|  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids)
        Nr   )r   r   r   )r   r   r   r   rF  rG  r   r   r   rL  pooler_outputr   rM  )r   r   rF  rN  r   r   r   r   rT   r    r   rW  rX  r#   r$   r   r   rZ   argmaxr   r   rM  )rA   r   r   r   r   rF  rG  input_shaper   r   encoder_outputsrL  pooled_outputs                r&   r   zOwlViTTextTransformer.forward  s     2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]nn&NN2{27	),W
 !A,,]5I5I!
 %7H[H[\N,,')"7/!5# ' 
 ,A. 112CD *LL*003<M<T<TULL#**r*2556G6N6NOQ

 %}58KKK)/')77&11	
 	
r(   rS  )rJ   rK   rL   r   r   r   r#   r   r   r   r   rF   r   r   r   r   s   @r&   rU  rU    s    S/ S  26/3,0/3&*?
<<?
 !.?
 u||,	?

 $D>?
 'tn?
 d^?
 
u00	1?
 ?
r(   rU  c                        e Zd ZU eed<   def fdZdej                  fdZd Z	e
	 	 	 	 ddej                  deej                     dee   d	ee   d
ee   deeef   fd       Z xZS )OwlViTTextModelr   c                 d    t         |   |       t        |      | _        | j	                          y rR   )r   r   rU  
text_model	post_initr   s     r&   r   zOwlViTTextModel.__init__  s&     /7r(   r   c                 B    | j                   j                  j                  S rR   rc  r   r   rH   s    r&   get_input_embeddingsz$OwlViTTextModel.get_input_embeddings  s    ))999r(   c                 :    || j                   j                  _        y rR   rf  )rA   values     r&   set_input_embeddingsz$OwlViTTextModel.set_input_embeddings  s    5:""2r(   r   r   r   rF  rG  c                 .    | j                  |||||      S )a  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids)

        Examples:
        ```python
        >>> from transformers import AutoProcessor, OwlViTTextModel

        >>> model = OwlViTTextModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> inputs = processor(
        ...     text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
        ... )
        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled (EOS token) states
        ```r   r   r   rF  rG  )rc  )rA   r   r   r   rF  rG  s         r&   r   zOwlViTTextModel.forward  s)    < )/!5#  
 	
r(   )NNNN)rJ   rK   rL   r   rO   r   r	   r>  rg  rj  r   r#   r   r   r   r   rF   r   r   r   r   s   @r&   ra  ra    s    / :bii :;  26,0/3&*#
<<#
 !.#
 $D>	#

 'tn#
 d^#
 
u00	1#
 #
r(   ra  c                        e Zd Zdef fdZe	 	 	 	 d
dej                  dee	   dee	   dee	   dee	   de
eef   fd	       Z xZS )OwlViTVisionTransformerr   c                 0   t         |           || _        t        |      | _        t        j                  |j                  |j                        | _	        t        |      | _        t        j                  |j                  |j                        | _        y r  )r   r   r   r   r   r	   r  r   r  pre_layernormr@  rW  post_layernormr   s     r&   r   z OwlViTVisionTransformer.__init__"  sk    08\\&*<*<&BWBWX$V, ll6+=+=6CXCXYr(   r   r   rF  r   rG  r   c                 (   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }| j                  j
                  j                  j                  }|j                  |      }| j	                  ||      }| j                  |      }| j                  ||||      }|d   }	|	d d dd d f   }
| j                  |
      }
|s
|	|
f|dd  z   S t        |	|
|j                  |j                        S )N)r   )r   r   rF  rG  r   r   rZ  )r   r   rF  rN  r   r   r   rT   r   rp  rW  rq  r   r   rM  )rA   r   r   rF  r   rG  expected_input_dtyper   r^  rL  r_  s              r&   r   zOwlViTVisionTransformer.forward+  s-    2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]  $>>EEKK#';<Ogh**=9,,'/!5#	 ' 
 ,A.)!Q'2++M:%}58KKK)/')77&11	
 	
r(   )NNFN)rJ   rK   rL   r   r   r   r#   rN   r   r   r   rF   r   r   r   r   s   @r&   rn  rn  !  s    Z1 Z  -1/338&*)
'')
 $D>)
 'tn	)

 #+4.)
 d^)
 
u00	1)
 )
r(   rn  c                        e Zd ZU eed<   dZdef fdZdej                  fdZ	e
	 	 	 	 	 ddeej                     dee   dee   ded	ee   deeef   fd
       Z xZS )OwlViTVisionModelr   r   c                 d    t         |   |       t        |      | _        | j	                          y rR   )r   r   rn  vision_modelrd  r   s     r&   r   zOwlViTVisionModel.__init__\  s'     3F;r(   r   c                 B    | j                   j                  j                  S rR   )rw  r   r   rH   s    r&   rg  z&OwlViTVisionModel.get_input_embeddingsb  s      ++;;;r(   r   rF  r   rG  c                 .    | j                  |||||      S )a  
        Examples:
        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, OwlViTVisionModel

        >>> model = OwlViTVisionModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled CLS states
        ```r   r   rF  r   rG  )rw  )rA   r   r   rF  r   rG  s         r&   r   zOwlViTVisionModel.forwarde  s+    6   %/!5%=# ! 
 	
r(   NNNFN)rJ   rK   rL   r   rO   main_input_namer   r	   r>  rg  r   r   r#   rN   r   r   rF   r   r   r   r   s   @r&   ru  ru  X  s    $O1 <bii <  59,0/3).&* 
u001 
 $D> 
 'tn	 

 #' 
 d^ 
 
u00	1 
  
r(   ru  c                       e Zd ZU eed<   def fdZe	 	 	 	 	 ddeej                     deej                     dee
   dee
   dee
   dej                  fd	       Ze	 	 	 	 	 dd
eej                     dee
   dee
   de
dee
   dej                  fd       Ze	 	 	 	 	 	 	 	 	 ddeej                     d
eej                     deej                     dee
   dee
   dee
   de
dee
   dee
   deeef   fd       Z xZS )r-  r   c                 <   t         |   |       t        |j                  t              s"t        dt        |j                         d      t        |j                  t              s"t        dt        |j                         d      |j                  }|j                  }|j                  | _	        |j                  | _        |j                  | _        t        |      | _        t        |      | _        t#        j$                  | j                  | j                  d      | _        t#        j$                  | j                  | j                  d      | _        t#        j*                  t-        j.                  |j0                              | _        | j5                          y )NzMconfig.text_config is expected to be of type OwlViTTextConfig but is of type .zQconfig.vision_config is expected to be of type OwlViTVisionConfig but is of type F)r   )r   r   r'  text_configr   	TypeErrortypevision_configr   projection_dimr   r/  r1  rU  rc  rn  rw  r	   r   r0  r.  r   r#   r   r4  r2  rd  )rA   r   r  r  r   s       r&   r   zOwlViTModel.__init__  sS    &,,.>?++,-Q0 
 &..0BC--./q2 
 ((,,$33)55 - 9 9/<3MB!#4+@+@$BUBU\a!b!yy)<)<d>Q>QX]^<<V5R5R(ST 	r(   r   r   r   rF  rG  r   c                     ||n| j                   j                  }| j                  |||      }|d   }| j                  |      }|S )a?  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids)

        Returns:
            text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
            applying the projection layer to the pooled output of [`OwlViTTextModel`].

        Examples:
        ```python
        >>> from transformers import AutoProcessor, OwlViTModel

        >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> inputs = processor(
        ...     text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
        ... )
        >>> text_features = model.get_text_features(**inputs)
        ```)r   r   rG  r   )r   rN  rc  r.  )	rA   r   r   r   rF  rG  text_outputr_  text_featuress	            r&   get_text_featureszOwlViTModel.get_text_features  sP    > &1%<k$++B]B] oo	.fqor#A,,];r(   r   r   c                     ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }| j	                  |||||      }|d   }| j                  |      }|S )aB  
        Returns:
            image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
            applying the projection layer to the pooled output of [`OwlViTVisionModel`].

        Examples:
        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, OwlViTModel

        >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> inputs = processor(images=image, return_tensors="pt")
        >>> image_features = model.get_image_features(**inputs)
        ```rz  r   )r   r   rF  rN  rw  r0  )	rA   r   r   rF  r   rG  vision_outputsr_  image_featuress	            r&   get_image_featureszOwlViTModel.get_image_features  s    8 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]**%/!5%=# + 
 'q)//>r(   return_lossreturn_base_image_embedsc
           	      (   ||n| j                   j                  }||n| j                   j                  }|	|	n| j                   j                  }	| j	                  |||||	      }
| j                  |||||	      }|d   }| j                  |      }|
d   }| j                  |      }|t        j                  j                  |ddd      z  }|t        j                  j                  |ddd      z  }| j                  j                         j                  |j                        }t        j                  ||j!                               |z  }|j!                         }d}|rt#        |      }|}|	s||||||
f}||f|z   S |S t%        |||||||
	      S )
a&  
        return_loss (`bool`, *optional*):
            Whether or not to return the contrastive loss.
        return_base_image_embeds (`bool`, *optional*):
            Whether or not to return the base image embeddings.

        Examples:
        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, OwlViTModel

        >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt")
        >>> outputs = model(**inputs)
        >>> logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
        >>> probs = logits_per_image.softmax(dim=1)  # we can take the softmax to get the label probabilities
        ```Nrz  rl  r   r^   r   T)ordr   keepdim)r1   r2   r3   r4   r5   r6   r7   )r   r   rF  rN  rw  rc  r.  r0  r#   linalgnormr2  expr   r    matmulr+   r.   r0   )rA   r   r   r   r  r   rF  r   r  rG  r  text_outputsr4   r5   text_embeds_normr2  r3   r2   r1   outputs                       r&   r   zOwlViTModel.forward  s   F 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]**%/!5%=# + 
 )/!5# ' 
 #1o**;7%a(--l; $ell&7&7!QS]a&7&bb&):):;ASU_c):)dd &&**,//0C0CD,,'79IJ[X*,,./D&&lT`bpqF)-)9TGf$EvE-+#%* .
 	
r(   rS  r{  )	NNNNNNFNN)rJ   rK   rL   r   rO   r   r   r   r#   r   r   rN   r  r  r   r   rF   r0   r   r   r   s   @r&   r-  r-    s   | @  -115,0/3&*%ELL)% !.% $D>	%
 'tn% d^% 
		% %N  59,0/3).&*,u001, $D>, 'tn	,
 #', d^, 
		, ,\  154815&*,0/3).37&*Z
E,,-Z
 u001Z
 !.	Z

 d^Z
 $D>Z
 'tnZ
 #'Z
 #+4.Z
 d^Z
 
ul"	#Z
 Z
r(   r-  c                   b     e Zd Zddedef fdZdej                  dej                  fdZ	 xZ
S )OwlViTBoxPredictionHeadr   out_dimc                 "   t         |           |j                  j                  }t	        j
                  ||      | _        t	        j
                  ||      | _        t	        j                         | _	        t	        j
                  ||      | _
        y rR   )r   r   r  r   r	   r   dense0dense1GELUgeludense2)rA   r   r  r   r   s       r&   r   z OwlViTBoxPredictionHead.__init__c  sb    $$00iiu-iiu-GGI	iiw/r(   r  r   c                     | j                  |      }| j                  |      }| j                  |      }| j                  |      }| j                  |      }|S rR   )r  r  r  r  )rA   r  r  s      r&   r   zOwlViTBoxPredictionHead.forwardl  sM    ^,6"V$6"V$r(   )   )rJ   rK   rL   r   rZ   r   r#   r   rN   r   r   r   s   @r&   r  r  b  s3    0| 0c 0ell u7H7H r(   r  c            	            e Zd Zdef fdZdej                  deej                     deej                     de	ej                     fdZ
 xZS )OwlViTClassPredictionHeadr   c                    t         |           |j                  j                  }|j                  j                  | _        t        j                  | j
                  |      | _        t        j                  | j
                  d      | _	        t        j                  | j
                  d      | _
        t        j                         | _        y )Nr   )r   r   r  r   r  	query_dimr	   r   r  logit_shiftr2  ELUelu)rA   r   r  r   s      r&   r   z"OwlViTClassPredictionHead.__init__v  s    $$00--99ii899T^^Q799T^^Q7668r(   r5   query_embeds
query_maskr   c                 0   | j                  |      }|S|j                  }|j                  d d \  }}t        j                  ||| j
                  f      j                  |      }||fS |t        j                  j                  |dd      dz   z  }|t        j                  j                  |dd      dz   z  }t        j                  d||      }| j                  |      }	| j                  |      }
| j                  |
      dz   }
||	z   |
z  }||j                  dkD  rt        j                  |d	      }t        j                  |d
k(  t        j                   |j"                        j$                  |      }|j                  t        j&                        }||fS )Nr^   r   T)r   r  gư>z...pd,...qd->...pqr   r   r   r   )r  r    r   r#   zerosr  r   r  r  einsumr  r2  r  ndimr   wherefinforT   rb   rU   )rA   r5   r  r  image_class_embedsr    r   r   pred_logitsr  r2  s              r&   r   z!OwlViTClassPredictionHead.forward  s    "[[6'..F&8&>&>r&B#J++z;&OPSSTZ[K!344 05<<3D3DEW]_im3D3nqu3uv#u||'8'82W['8'\_c'cd ll#79K\Z &&|4&&|4hh{+a/"[0K?!""__ZR@
++jAou{{;CTCT7U7Y7Y[fgK%..7K/00r(   )rJ   rK   rL   r   r   r#   rN   r   r   rF   r   r   r   s   @r&   r  r  u  s_    	| 	!1''!1 u001!1 U\\*	!1
 
u  	!!1r(   r  c                       e Zd ZU eed<   def fdZedededej                  fd       Z
 ed      	 ddeded	eej                     dej                  fd
       Z	 d dej                  d	ej                  dedej                  fdZ	 	 d!dej                  deej                     deej                     deej                     fdZ	 	 	 d"dej                  dej                  dej                  dee   dee   dedeej                     fdZ	 	 	 d"dej                  dee   dee   dedeej                     f
dZ	 d dej                  dej                  dedej                  fdZe	 	 	 	 	 d#dej                  deej                     dee   dee   dedee   defd       Ze	 	 	 	 	 d#dej                  dej                  deej                     dee   dee   dedee   defd       Z xZS )$OwlViTForObjectDetectionr   c                    t         |   |       t        |      | _        t	        |      | _        t        |      | _        t        j                  |j                  j                  |j                  j                        | _        t        j                         | _        || _        | j                   j                  j"                  | j                   j                  j$                  z  | _        | j                   j                  j"                  | j                   j                  j$                  z  | _        | j+                  | j&                  | j(                        | _        y r  )r   r   r-  r  r  
class_headr  box_headr	   r  r  r   r  
layer_normSigmoidsigmoidr   r   r   num_patches_heightnum_patches_widthcompute_box_biasbox_biasr   s     r&   r   z!OwlViTForObjectDetection.__init__  s     !&)3F;/7,,v';';'G'GVMaMaMpMpqzz|"&++";";"F"F$++JcJcJnJn"n!%!:!:!E!EIbIbImIm!m--d.E.EtG]G]^r(   r  r  r   c                 j   t        j                  d|dz   t         j                        }t        j                  d| dz   t         j                        }t        j                  ||d      \  }}t        j                  ||fd      }|dxx   |z  cc<   |dxx   | z  cc<   |j                  dd	      }|S )
Nr   )rT   xy)indexingr   r   .r   .r   r^   )r#   r$   rU   meshgridstackr   )r  r  x_coordinatesy_coordinatesxxyybox_coordinatess          r&   !normalize_grid_corner_coordinatesz:OwlViTForObjectDetection.normalize_grid_corner_coordinates  s     Q(9A(=U]]SQ(:Q(>emmT}tLB  ++r2hB7#44#55 *..r15r(   r^   )maxsizefeature_mapc                    |t        d      | j                  ||      }t        j                  |dd      }t        j                  |dz         t        j
                  | dz         z
  }t        j                  |d      }|dxx   |z  cc<   |dxx   |z  cc<   t        j                  |dz         t        j
                  | dz         z
  }t        j                  ||gd      }|S )	NzOfeature_map has been deprecated as an input. Please pass in num_patches insteadr"  r%  g-C6?r  r  r   r   )rr   r  r#   cliploglog1p	full_liker   )	rA   r  r  r  r  box_coord_biasbox_sizebox_size_biasr  s	            r&   r  z)OwlViTForObjectDetection.compute_box_bias  s     "noo@@ASUfg**_c3? ?T#9:U[[/IY\`I`=aa ??>37--..		(T/2U[[(TAQ5RR 99nm<"Er(   image_featsr   c                     | j                  |      }|r$|j                  \  }}}}| j                  ||      }n| j                  }|j	                  |j
                        }||z  }| j                  |      }|S )a  
        Args:
            image_feats:
                Features extracted from the image, returned by the `image_text_embedder` method.
            feature_map:
                A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method.
            interpolate_pos_encoding:
                Whether to interpolate the pre-trained position encodings.
        Returns:
            pred_boxes:
                List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary.
        )r  r   r  r  r   r    r  )	rA   r  r  r   r{   r   r  r  r  s	            r&   box_predictorz&OwlViTForObjectDetection.box_predictor  s|    & ]];/
 $:E:K:K7A!#4a,,-?ARSH}}H;;{112h
\\*-
r(   r  r  c                 6    | j                  |||      \  }}||fS )a8  
        Args:
            image_feats:
                Features extracted from the `image_text_embedder`.
            query_embeds:
                Text query embeddings.
            query_mask:
                Must be provided with query_embeddings. A mask indicating which query embeddings are valid.
        )r  )rA   r  r  r  r  r  s         r&   class_predictorz(OwlViTForObjectDetection.class_predictor  s)     -1OOKWa,b)(/00r(   r   r   r   r   rF  c           	         | j                  ||||||d      }|rX|j                  \  }}}	}
|	| j                  j                  j                  z  }|
| j                  j                  j                  z  }n| j
                  }| j                  }|j                  d   }| j                   j                  j                  |      }t        j                  |d d d dd d f   |d d d df   j                        }|d d dd d d f   |z  }| j                  |      }|j                  d   |||j                  d   f}|j                  |      }|d   }|||fS )NT)r   r   r   r   rF  r   rG  r   r   r   )r  r   r   r  r   r  r  r7   rw  rq  r#   broadcast_tor  r   )rA   r   r   r   r   rF  r   r  r   r   r   r  r  rL  r5   class_token_outnew_sizer4   s                     r&   image_text_embedderz,OwlViTForObjectDetection.image_text_embedder  sv    ++%)/!5%=  
 $"."4"4Aq&%!'4;;+D+D+O+O!O %)B)B)M)M M!%!8!8 $ 6 6 $77:{{//>>?PQ  ,,\!RaR(-C\RSUXVXUXRXEYE_E_` $Aqr1H-?|4 q!r"	
 $++H5bk\733r(   c                    | j                   j                  ||d      }|rX|j                  \  }}}}|| j                  j                  j
                  z  }	|| j                  j                  j
                  z  }
n| j                  }	| j                  }
|d   }| j                   j                  j                  |      }t        j                  |d d d dd d f   |d d d df   j                        }|d d dd d d f   |z  }| j                  |      }|j                  d   |	|
|j                  d   f}|j                  |      }||fS )NT)r   r   rG  r   r   r   )r  rw  r   r   r  r   r  r  rq  r#   r  r  r   )rA   r   r   rF  r   r  r   r   r   r  r  rL  r5   r  r  s                  r&   image_embedderz'OwlViTForObjectDetection.image_embedderE  s_    11%@Xfj 2 
 $"."4"4Aq&%!'4;;+D+D+O+O!O %)B)B)M)M M!%!8!8 $ 6 6 +1-{{//>>?PQ  ,,\!RaR(-C\RSUXVXUXRXEYE_E_` $Aqr1H-?|4 q!r"	
 $++H5n--r(   query_image_featuresquery_feature_mapc                 j   | j                  |      \  }}| j                  |||      }t        |      }g }g }	|j                  }
t	        |j
                  d         D ]  }t        j                  g dg|
      }||   }t        ||      \  }}t        j                  |d   dk(        rt        ||      }t        j                  |      dz  }|d   |k\  j                         }|j                         s||   |j                  d         }t        j                  ||   d      }t        j                   d||      }|t        j"                  |         }|j%                  ||   |          |	j%                  |       " |r+t        j&                  |      }t        j&                  |	      }nd	\  }}|||fS )
Nr   )r   r   r   r   r   r"  g?r   )axiszd,id->iNN)r  r  r   r    rC  r   r#   r   ro   rq   rv   rc   nonzeronumelsqueezer#  r  argminappendr  )rA   r  r  r   r   r|   r{   pred_boxes_as_cornersbest_class_embedsbest_box_indicespred_boxes_deviceieach_query_boxeach_query_pred_boxesiousiou_thresholdselected_indsselected_embeddingsmean_embedsmean_simbest_box_indr  box_indicess                          r&   embed_image_queryz*OwlViTForObjectDetection.embed_image_queryo  s    ../CD<''(<>OQij
 8 D 188+11!45 	6A"\\<.ARSN$9!$<!n.CDGD! yyaC(*>;PQ "IIdOc1M!!W5>>@M""$&21om6K6KA6N&O##jjaqA <<	;@ST,U\\(-CD!((a)FG ''5'	6*  ;;'89L++&67K(2%L+[*44r(   query_pixel_valuesrG  c           
         ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }| j	                  ||      d   }| j	                  ||||      \  }}	|j
                  \  }
}}}t        j                  ||
||z  |f      }|j
                  \  }
}}}t        j                  ||
||z  |f      }| j                  |||      \  }}}| j                  ||      \  }}| j                  |||      }|s+|||||||	j                         f}t        d |D              }|S t        ||||||d|	      S )a  
        query_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values of query image(s) to be detected. Pass in one query image per target image.

        Examples:
        ```python
        >>> import requests
        >>> from PIL import Image
        >>> import torch
        >>> from transformers import AutoProcessor, OwlViTForObjectDetection

        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch16")
        >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch16")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg"
        >>> query_image = Image.open(requests.get(query_url, stream=True).raw)
        >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt")
        >>> with torch.no_grad():
        ...     outputs = model.image_guided_detection(**inputs)
        >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
        >>> target_sizes = torch.Tensor([image.size[::-1]])
        >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
        >>> results = processor.post_process_image_guided_detection(
        ...     outputs=outputs, threshold=0.6, nms_threshold=0.3, target_sizes=target_sizes
        ... )
        >>> i = 0  # Retrieve predictions for the first image
        >>> boxes, scores = results[i]["boxes"], results[i]["scores"]
        >>> for box, score in zip(boxes, scores):
        ...     box = [round(i, 2) for i in box.tolist()]
        ...     print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}")
        Detected similar object with confidence 0.856 at location [10.94, 50.4, 315.8, 471.39]
        Detected similar object with confidence 1.0 at location [334.84, 25.33, 636.16, 374.71]
        ```N)r   r   r   )r   r   rF  r   )r  r  c              3   &   K   | ]	  }||  y wrR   rP   r?   xs     r&   rB   zBOwlViTForObjectDetection.image_guided_detection.<locals>.<genexpr>       >1>rK  )r5   r   r   r   r   r|   r6   r7   )r   r   rF  rG  r  r   r#   r   r  r  r  r=   rF   r   )rA   r   r  r   rF  r   rG  r  r  r  r   r  r  
hidden_dimr  query_image_featsr  r  r   r  r|   r   r  s                          r&   image_guided_detectionz/OwlViTForObjectDetection.image_guided_detection  s   X 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++BYBY !//+F^ 0 

 '+&9&9%/!5%=	 ': '
#^ ITHYHYE
&(9:mmK*>PSd>dfp1qrHYH_H_E
&(9:!MM
,>AR,RT^_
 <@;Q;Q02J<
8&(8
 '+&:&:{am&:&n#l !..{KIab!! '')F >f>>FM5$0/-%" .	
 		
r(   c           	         ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }| j	                  ||||||      \  }}	}
|
j
                  }|
j                  }|	j                  \  }}}}t        j                  |	|||z  |f      }|j                  d   |z  }|j                  |||j                  d         }|j                  |||j                  d         }|d   dkD  }| j                  |||      \  }}| j                  ||	|      }|s9||||	||j                         |j                         f}t        d |D              }|S t        |	||||||      S )a	  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids).
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the last hidden state. See `text_model_last_hidden_state` and
            `vision_model_last_hidden_state` under returned tensors for more detail.

        Examples:
        ```python
        >>> import requests
        >>> from PIL import Image
        >>> import torch

        >>> from transformers import OwlViTProcessor, OwlViTForObjectDetection

        >>> processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> text_labels = [["a photo of a cat", "a photo of a dog"]]
        >>> inputs = processor(text=text_labels, images=image, return_tensors="pt")
        >>> outputs = model(**inputs)

        >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
        >>> target_sizes = torch.tensor([(image.height, image.width)])
        >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
        >>> results = processor.post_process_grounded_object_detection(
        ...     outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels
        ... )
        >>> # Retrieve predictions for the first image for the corresponding text queries
        >>> result = results[0]
        >>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"]
        >>> for box, score, text_label in zip(boxes, scores, text_labels):
        ...     box = [round(i, 2) for i in box.tolist()]
        ...     print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}")
        Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29]
        Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17]
        ```)r   r   r   r   rF  r   r   r   r  c              3   &   K   | ]	  }||  y wrR   rP   r  s     r&   rB   z3OwlViTForObjectDetection.forward.<locals>.<genexpr>e  r  rK  )r5   r4   r{   r   r|   r6   r7   )r   r   rF  rG  r  r6   r7   r   r#   r   r  r  r=   rF   ry   )rA   r   r   r   r   rF  r   rG  r  r  r  r  r  r   r  r  r  r  max_text_queriesr  r  r|   r{   r  s                           r&   r   z OwlViTForObjectDetection.forward  s   h 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++BYBY .2-E-E%)/!5%= .F .
*k7 00 44HSHYHYE
&(9:mmK*>PSd>dfp1qr %??1-;#++J8H,J\J\]_J`a %%j2BIOOTVDWX	v&*
 '+&:&:;V`&a#l ''[BZ[
%%''')F >f>>FM*$$!%* .
 	
r(   rR   r   r  r  r{  )rJ   rK   rL   r   rO   r   staticmethodrZ   r#   r   r  r   r   rN   r  r   r  rF   r  r  r  r  r   r   r  ry   r   r   r   s   @r&   r  r    sn   _| _ c VY ^c^j^j    qjn"%:=LTUZUfUfLg	 4 */	&& && #'	
 
		H 59-1	1&&1 u0011 U\\*	1
 
u  	!10 -1/3).14<<14 ''14 	14
 $D>14 'tn14 #'14 
u  	!14l -1/3).(.''(. $D>(. 'tn	(.
 #'(. 
u  	!(.\ */	*5#//*5 !,,*5 #'	*5
 
		*5X  ;?,0/3).&*d
''d
 %U%6%67d
 $D>	d

 'tnd
 #'d
 d^d
 
0d
 d
L 
 26,0/3).&*m
<<m
 ''m
 !.	m

 $D>m
 'tnm
 #'m
 d^m
 
%m
 m
r(   r  )r-  r  ra  ru  r  )BrM   dataclassesr   	functoolsr   typingr   r   r   r#   torch.utils.checkpointr   r	   activationsr   modeling_attn_mask_utilsr   r   modeling_layersr   modeling_outputsr   r   modeling_utilsr   utilsr   r   r   r   r   configuration_owlvitr   r   r   transformers.image_transformsr   
get_loggerrJ   loggerr'   r.   r0   r[   r_   ro   rv   ry   r   r>  r   r   r   r  r  r  r@  rU  ra  rn  ru  r-  r  r  r  __all__rP   r(   r&   <module>r     sd    !  ' '    ! d 9 K - Y Y T T F 
		H	%`U\\ `ell `
-ELL -U\\ - !
; !
  !
JGv G& GEF Ev E""'0 
+
+ +
 +
\ 
*
[ *
 *
ZFRYY FR299 >h2bii h2X		  /3 /d ,)O ,) ,)^M
BII M
`I
BII I
X3
+ 3
l4
bii 4
n.
- .
b U
' U
 U
pbii &-1		 -1`K
4 K
\ wr(   