
    rh0                        d Z ddlmZ ddlmZmZ ddlZddlZddlmZ ddl	m
Z
 ddlmZmZmZ dd	lmZ d
dlmZ  ej&                  e      Ze ed       G d de                    Ze G d de
             ZddZ G d dej2                        Z G d dej2                        Z ed       G d de             ZddgZy)zPyTorch VitPose model.    )	dataclass)OptionalUnionN)nn   )PreTrainedModel)ModelOutputauto_docstringlogging)load_backbone   )VitPoseConfigz6
    Class for outputs of pose estimation models.
    )custom_introc                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                  df      ed<   dZeeej                  df      ed<   y)VitPoseEstimatorOutputaH  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Loss is not supported at this moment. See https://github.com/ViTAE-Transformer/ViTPose/tree/main/mmpose/models/losses for further detail.
    heatmaps (`torch.FloatTensor` of shape `(batch_size, num_keypoints, height, width)`):
        Heatmaps as predicted by the model.
    hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
        one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
        (also called feature maps) of the model at the output of each stage.
    Nlossheatmaps.hidden_states
attentions)__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r   r   tupler        /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/vitpose/modeling_vitpose.pyr   r   '   sq    	 )-D(5$$
%,,0Hhu(()0=AM8E%"3"3S"89:A:>Ju00#567>r   r   c                   |    e Zd ZU eed<   dZdZdZdee	j                  e	j                  e	j                  f   ddfdZy)	VitPosePreTrainedModelconfigvitpixel_valuesTmodulereturnNc                    t        |t        j                  t        j                  f      rt        j                  j                  |j                  j                  j                  t        j                        d| j                  j                        j                  |j                  j                        |j                  _        |j                  %|j                  j                  j                          yyt        |t        j                         rJ|j                  j                  j                          |j                  j                  j#                  d       yy)zInitialize the weightsg        )meanstdNg      ?)
isinstancer   LinearConv2dinittrunc_normal_weightdatator   float32r#   initializer_rangedtypebiaszero_	LayerNormfill_)selfr&   s     r    _init_weightsz$VitPosePreTrainedModel._init_weightsF   s    fryy"))45 "$!6!6""%%emm43DKKDaDa "7 "b$$% MM {{&  &&( '-KK""$MM$$S) .r   )r   r   r   r   r   base_model_prefixmain_input_namesupports_gradient_checkpointingr   r   r,   r-   r8   r;   r   r   r    r"   r"   ?   sE    $O&*#*E"))RYY*L$M *RV *r   r"   c                    |dvrt        d      | j                  dk7  rt        d      | j                  \  }}}}d}|dk(  rd}| dddddd	f    | dddddd	f<   | j                  |d
|||      } | j	                         }|j                         D ])  \  }	}
| dd|
d	f   |dd|	d	f<   | dd|	d	f   |dd|
d	f<   + |j                  ||||f      }|j                  d
      }|S )a  Flip the flipped heatmaps back to the original form.

    Args:
        output_flipped (`torch.tensor` of shape `(batch_size, num_keypoints, height, width)`):
            The output heatmaps obtained from the flipped images.
        flip_pairs (`torch.Tensor` of shape `(num_keypoints, 2)`):
            Pairs of keypoints which are mirrored (for example, left ear -- right ear).
        target_type (`str`, *optional*, defaults to `"gaussian-heatmap"`):
            Target type to use. Can be gaussian-heatmap or combined-target.
            gaussian-heatmap: Classification target with gaussian distribution.
            combined-target: The combination of classification target (response map) and regression target (offset map).
            Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).

    Returns:
        torch.Tensor: heatmaps that flipped back to the original image
    )gaussian-heatmapcombined-targetz9target_type should be gaussian-heatmap or combined-target   zCoutput_flipped should be [batch_size, num_keypoints, height, width]r   rA   r   N.)
ValueErrorndimshapereshapeclonetolistflip)output_flipped
flip_pairstarget_type
batch_sizenum_keypointsheightwidthchannelsoutput_flipped_backleftrights              r    	flip_backrV   U   s8   " AATUUa^__/=/C/C,JvuH''(6q!$Q$|(D'Dq!$Q$|$#++JHfeTN(..0 "((* Je,:1eS=,IAtSL)-;AtSL-IAucM*J .55z=RXZ_6`a-2226r   c                   |     e Zd ZdZd fdZddej                  deej                     dej                  fdZ xZ	S )	VitPoseSimpleDecoderz
    Simple decoding head consisting of a ReLU activation, 4x upsampling and a 3x3 convolution, turning the
    feature maps into heatmaps.
    r'   c                    t         |           t        j                         | _        t        j
                  |j                  dd      | _        t        j                  |j                  j                  |j                  ddd      | _        y )NbilinearF)scale_factormodealign_cornersr   r   kernel_sizestridepadding)super__init__r   ReLU
activationUpsampler[   
upsamplingr-   backbone_confighidden_size
num_labelsconvr:   r#   	__class__s     r    rc   zVitPoseSimpleDecoder.__init__   se    '')++63F3FZglmII""..0A0AqYZde
	r   hidden_staterL   c                     | j                  |      }| j                  |      }| j                  |      }|t        ||      }|S N)re   rg   rk   rV   r:   rn   rL   r   s       r    forwardzVitPoseSimpleDecoder.forward   sC    |4|499\*! :6Hr   )r'   Nrp   )
r   r   r   r   rc   r   Tensorr   rr   __classcell__rm   s   @r    rX   rX   }   s;    

	ELL 	hu||>T 	`e`l`l 	r   rX   c                   h     e Zd ZdZdef fdZddej                  deej                     fdZ	 xZ
S )VitPoseClassicDecoderz
    Classic decoding head consisting of a 2 deconvolutional blocks, followed by a 1x1 convolution layer,
    turning the feature maps into heatmaps.
    r#   c                    t         |           t        j                  |j                  j
                  ddddd      | _        t        j                  d      | _        t        j                         | _
        t        j                  dddddd      | _        t        j                  d      | _        t        j                         | _        t        j                  d|j                  ddd      | _        y )	N   rB      r   F)r_   r`   ra   r6   r   r^   )rb   rc   r   ConvTranspose2drh   ri   deconv1BatchNorm2d
batchnorm1rd   relu1deconv2
batchnorm2relu2r-   rj   rk   rl   s     r    rc   zVitPoseClassicDecoder.__init__   s    ))""..1VW^c
 ..-WWY
))#s!UV]bc..-WWY
IIc6#4#4!AWXY	r   rn   rL   c                    | j                  |      }| j                  |      }| j                  |      }| j                  |      }| j	                  |      }| j                  |      }| j                  |      }|t        ||      }|S rp   )r|   r~   r   r   r   r   rk   rV   rq   s       r    rr   zVitPoseClassicDecoder.forward   s{    ||L1|4zz,/||L1|4zz,/99\*! :6Hr   rp   )r   r   r   r   r   rc   r   rs   r   rr   rt   ru   s   @r    rw   rw      s6    
Z} ZELL hu||>T r   rw   z?
    The VitPose model with a pose estimation head on top.
    c                        e Zd Zdeddf fdZe	 	 	 	 	 	 ddej                  deej                     deej                     deej                     d	ee	   d
ee	   dee	   de
eef   fd       Z xZS )VitPoseForPoseEstimationr#   r'   Nc                    t         |   |       t        |      | _        t	        | j                  j
                  d      st        d      t	        | j                  j
                  d      st        d      t	        | j                  j
                  d      st        d      |j                  rt        |      n
t        |      | _
        | j                          y )Nri   z0The backbone should have a hidden_size attribute
image_sizez0The backbone should have an image_size attribute
patch_sizez/The backbone should have a patch_size attribute)rb   rc   r   backbonehasattrr#   rD   use_simple_decoderrX   rw   head	post_initrl   s     r    rc   z!VitPoseForPoseEstimation.__init__   s     %f- t}}++];OPPt}}++\:OPPt}}++\:NOO4:4M4M(0ShioSp	 	r   r%   dataset_indexrL   labelsoutput_attentionsoutput_hidden_statesreturn_dictc                 |   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }d}|t	        d      | j
                  j                  |||||      }	|r|	j                  d   n|	d   d   }
|
j                  d   }| j                   j                  j                  d   | j                   j                  j                  d   z  }| j                   j                  j                  d   | j                   j                  j                  d   z  }|
j                  ddd      j                  |d||      j                         }
| j                  |
|      }|s|r
|f|	dd z   }n	|f|	dd z   }||f|z   S |S t!        |||	j"                  |	j$                  	      S )
ac  
        dataset_index (`torch.Tensor` of shape `(batch_size,)`):
            Index to use in the Mixture-of-Experts (MoE) blocks of the backbone.

            This corresponds to the dataset index used during training, e.g. For the single dataset index 0 refers to the corresponding dataset. For the multiple datasets index 0 refers to dataset A (e.g. MPII) and index 1 refers to dataset B (e.g. CrowdPose).
        flip_pairs (`torch.tensor`, *optional*):
            Whether to mirror pairs of keypoints (for example, left ear -- right ear).

        Examples:

        ```python
        >>> from transformers import AutoImageProcessor, VitPoseForPoseEstimation
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> processor = AutoImageProcessor.from_pretrained("usyd-community/vitpose-base-simple")
        >>> model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> boxes = [[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]]]
        >>> inputs = processor(image, boxes=boxes, return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        >>> heatmaps = outputs.heatmaps
        ```NzTraining is not yet supported)r   r   r   r   rC   r   r   rz   )rL   )r   r   r   r   )r#   use_return_dictr   r   NotImplementedErrorr   forward_with_filtered_kwargsfeature_mapsrF   rh   r   r   permuterG   
contiguousr   r   r   r   )r:   r%   r   rL   r   r   r   r   r   outputssequence_outputrN   patch_heightpatch_widthr   outputs                   r    rr   z VitPoseForPoseEstimation.forward   s   P &1%<k$++B]B]$8$D $++JjJj 	 2C1N-TXT_T_TqTq%&EFF--<<'!5/# = 
 7B'..r2wqzRT~$**1-
{{22==a@DKKD_D_DjDjklDmmkk11<<Q?4;;C^C^CiCijkCll##Aq!,44Z\S^_jjl 	 99_9D#"wqr{2"wqr{2)-)9TGf$EvE%!//))	
 	
r   )NNNNNN)r   r   r   r   rc   r
   r   rs   r   boolr   r   r   rr   rt   ru   s   @r    r   r      s    }  $  15-1)-,0/3&*P
llP
  -P
 U\\*	P

 &P
 $D>P
 'tnP
 d^P
 
u,,	-P
 P
r   r   )r@   )r   dataclassesr   typingr   r   r   torch.utils.checkpointr   modeling_utilsr   utilsr	   r
   r   utils.backbone_utilsr   configuration_vitposer   
get_loggerr   loggerr   r"   rV   ModulerX   rw   r   __all__r   r   r    <module>r      s     ! "    - 
 2 0 
		H	%
 
?[ ? ?$ *_ * **%P299 6#BII #L 
d
5 d

d
N $%?
@r   