
    rh@                     "   d Z ddlmZmZmZ ddlZddlmZ ddlmZ ddlm	Z	m
Z
mZ ddlmZ ddlmZ ddlmZmZmZmZ dd	lmZ dd
lmZ ddlmZ ddlmZ  ej:                  e      Z G d dej@                        Z! G d dej@                        Z" G d dej@                        Z# G d dej@                        Z$e G d de             Z%e G d de%             Z& ed       G d de%             Z' ed       G d d e%e             Z(g d!Z)y)"zPyTorch TextNet model.    )AnyOptionalUnionN)Tensor)BCEWithLogitsLossCrossEntropyLossMSELoss)PreTrainedModel)ACT2CLS)BackboneOutputBaseModelOutputWithNoAttention(BaseModelOutputWithPoolingAndNoAttention$ImageClassifierOutputWithNoAttention)TextNetConfig)logging)BackboneMixin   )auto_docstringc                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )TextNetConvLayerconfigc                    t         |           |j                  | _        |j                  | _        |j                  | _        t        |j                  t              r$|j                  d   dz  |j                  d   dz  fn|j                  dz  }t        j                  |j                  |j                  |j                  |j                  |d      | _        t        j                  |j                  |j                         | _        t        j$                         | _        | j                  t)        | j                            | _        y y )Nr         F)kernel_sizestridepaddingbias)super__init__stem_kernel_sizer   stem_strider   stem_act_funcactivation_function
isinstancetuplennConv2dstem_num_channelsstem_out_channelsconvBatchNorm2dbatch_norm_eps
batch_normIdentity
activationr   )selfr   r   	__class__s      /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/textnet/modeling_textnet.pyr    zTextNetConvLayer.__init__+   s   !22((#)#7#7  &1159 "a'););A)>!)CD((A- 	 II$$$$//%%
	 ..)A)A6CXCXY++-##/%d&>&>?ADO 0    hidden_statesreturnc                 h    | j                  |      }| j                  |      }| j                  |      S N)r+   r.   r0   )r1   r5   s     r3   forwardzTextNetConvLayer.forwardF   s-    		-06}--r4   )	__name__
__module____qualname__r   r    torchr   r9   __classcell__r2   s   @r3   r   r   *   s,    B} B6.U\\ .ell .r4   r   c            
       p     e Zd ZdZdededededef
 fdZdej                  d	ej                  fd
Z	 xZ
S )TextNetRepConvLayera  
    This layer supports re-parameterization by combining multiple convolutional branches
    (e.g., main convolution, vertical, horizontal, and identity branches) during training.
    At inference time, these branches can be collapsed into a single convolution for
    efficiency, as per the re-parameterization paradigm.

    The "Rep" in the name stands for "re-parameterization" (introduced by RepVGG).
    r   in_channelsout_channelsr   r   c                 t   t         	|           || _        || _        || _        || _        |d   dz
  dz  |d   dz
  dz  f}t        j                         | _        t        j                  |||||d      | _
        t        j                  ||j                        | _        |d   dz
  dz  df}d|d   dz
  dz  f}|d   dk7  rLt        j                  |||d   df||d      | _        t        j                  ||j                        | _        nd\  | _        | _        |d   dk7  rLt        j                  ||d|d   f||d      | _        t        j                  ||j                        | _        nd\  | _        | _        ||k(  r,|dk(  r't        j                  ||j                        | _        y d | _        y )Nr   r   r   F)rB   rC   r   r   r   r   )num_featuresepsNN)r   r    num_channelsrC   r   r   r'   ReLUr$   r(   	main_convr,   r-   main_batch_normvertical_convvertical_batch_normhorizontal_convhorizontal_batch_normrbr_identity)
r1   r   rB   rC   r   r   r   vertical_paddinghorizontal_paddingr2   s
            r3   r    zTextNetRepConvLayer.__init__V   s   '(&NQ&1,{1~/Aa.GH#%779 #%#
  "~~<VMbMbc(^a/A5q9+a.1"4!:;q>Q!#')(^Q/("D (*~~<U[UjUj'kD$;E8D 8q>Q#%99')A/*$D  *,\W]WlWl)mD&?I<D $"< {*v{ NN9N9NO 	  	r4   r5   r6   c                 x   | j                  |      }| j                  |      }| j                  '| j                  |      }| j                  |      }||z   }| j                  '| j	                  |      }| j                  |      }||z   }| j                  | j                  |      }||z   }| j                  |      S r8   )rJ   rK   rL   rM   rN   rO   rP   r$   )r1   r5   main_outputsvertical_outputshorizontal_outputsid_outs         r3   r9   zTextNetRepConvLayer.forward   s    ~~m4++L9 )#11-@#778HI'*::L +!%!5!5m!D!%!;!;<N!O'*<<L(&&}5F'&0L''55r4   )r:   r;   r<   __doc__r   intr    r=   r   r9   r>   r?   s   @r3   rA   rA   L   sN    7
} 7
3 7
c 7
`c 7
mp 7
r6U\\ 6ell 6r4   rA   c                   .     e Zd Zdedef fdZd Z xZS )TextNetStager   depthc                 p   t         |           |j                  |   }|j                  |   }t	        |      }|j
                  |   }|j
                  |dz      }|g|g|dz
  z  z   }|g|z  }	g }
t        ||	||      D ]  }|
j                  t        |g|         t        j                  |
      | _        y )Nr   )r   r    conv_layer_kernel_sizesconv_layer_strideslenhidden_sizeszipappendrA   r'   
ModuleListstage)r1   r   r\   r   r   
num_layersstage_in_channel_sizestage_out_channel_sizerB   rC   re   stage_configr2   s               r3   r    zTextNetStage.__init__   s    44U;**51%
 & 3 3E :!'!4!4UQY!?,-1G0HJYZN0[[./*<\;O 	ELLL,VClCD	E]]5)
r4   c                 8    | j                   D ]
  } ||      } |S r8   )re   )r1   hidden_stateblocks      r3   r9   zTextNetStage.forward   s%    ZZ 	/E .L	/r4   )r:   r;   r<   r   rY   r    r9   r>   r?   s   @r3   r[   r[      s    *} *S *"r4   r[   c            	       b     e Zd Zdef fdZ	 	 ddej                  dee   dee   de	fdZ
 xZS )	TextNetEncoderr   c                     t         |           g }t        |j                        }t	        |      D ]  }|j                  t        ||              t        j                  |      | _	        y r8   )
r   r    r`   r^   rangerc   r[   r'   rd   stages)r1   r   rq   
num_stagesstage_ixr2   s        r3   r    zTextNetEncoder.__init__   s\    778
j) 	:HMM,vx89	: mmF+r4   rk   output_hidden_statesreturn_dictr6   c                     |g}| j                   D ]  } ||      }|j                  |        |s|f}|r||fz   S |S t        ||      S )N)last_hidden_stater5   )rq   rc   r   )r1   rk   rt   ru   r5   re   outputs          r3   r9   zTextNetEncoder.forward   se     &[[ 	/E .L  .	/ "_F0D6],,P&P-\ijjr4   rG   )r:   r;   r<   r   r    r=   r   r   boolr   r9   r>   r?   s   @r3   rn   rn      sS    ,} , 04&*	kllk 'tnk d^	k
 
(kr4   rn   c                   &    e Zd ZU eed<   dZdZd Zy)TextNetPreTrainedModelr   textnetpixel_valuesc                    t        |t        j                  t        j                  f      rm|j                  j
                  j                  d| j                  j                         |j                  %|j                  j
                  j                          y y t        |t        j                        rW|j                  j
                  j                  d       |j                  %|j                  j
                  j                          y y y )Ng        )meanstdg      ?)r%   r'   Linearr(   weightdatanormal_r   initializer_ranger   zero_r,   fill_)r1   modules     r3   _init_weightsz$TextNetPreTrainedModel._init_weights   s    fryy"))45MM&&CT[[5R5R&S{{&  &&( '/MM$$S){{&  &&( ' 0r4   N)r:   r;   r<   r   __annotations__base_model_prefixmain_input_namer    r4   r3   r{   r{      s    !$O)r4   r{   c                   r     e Zd Z fdZe	 ddedee   dee   dee	e
ee
   f   e	e
   ef   fd       Z xZS )TextNetModelc                     t         |   |       t        |      | _        t	        |      | _        t        j                  d      | _        | j                          y )N)r   r   )
r   r    r   stemrn   encoderr'   AdaptiveAvgPool2dpooler	post_initr1   r   r2   s     r3   r    zTextNetModel.__init__   sD     $V,	%f-**62r4   r}   rt   ru   r6   c                 :   ||n| j                   j                  }||n| j                   j                  }| j                  |      }| j	                  |||      }|d   }| j                  |      }|s||f}|r	||d   fz   S |S t        |||r
|d         S d       S )Nrt   ru   r   r   )rw   pooler_outputr5   )r   use_return_dictrt   r   r   r   r   )	r1   r}   rt   ru   rk   encoder_outputsrw   pooled_outputrx   s	            r3   r9   zTextNetModel.forward   s     &1%<k$++B]B]$8$D $++JjJj 	 yy.,,/CQ\ ' 
 ,A.$56'7F5I6_Q/11UvU7/'0D/!,
 	
 KO
 	
r4   rG   )r:   r;   r<   r    r   r   r   ry   r   r&   r   listr   r9   r>   r?   s   @r3   r   r      sg     os
"
:B4.
^fgk^l
	uS$s)^$eCj2ZZ	[
 
r4   r   z
    TextNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
    ImageNet.
    )custom_introc                        e Zd Z fdZe	 	 	 	 ddeej                     deej                     dee	   dee	   de
f
d       Z xZS )	TextNetForImageClassificationc                    t         |   |       |j                  | _        t        |      | _        t        j                  d      | _        t        j                         | _	        |j                  dkD  r-t        j                  |j                  d   |j                        nt        j                         | _        t        j                  | j                  | j                  g      | _        | j!                          y )N)r   r   r   )r   r    
num_labelsr   r|   r'   r   avg_poolFlattenflattenr   ra   r/   fcrd   
classifierr   r   s     r3   r    z&TextNetForImageClassification.__init__  s      ++#F+,,V4zz|KQK\K\_`K`"))F//3V5F5FGfhfqfqfs --(EF 	r4   r}   labelsrt   ru   r6   c                 .   ||n| j                   j                  }| j                  |||      }|d   }| j                  D ]
  } ||      } | j	                  |      }d}	|| j                   j
                  | j                  dk(  rd| j                   _        nl| j                  dkD  rL|j                  t        j                  k(  s|j                  t        j                  k(  rd| j                   _        nd| j                   _        | j                   j
                  dk(  rIt               }
| j                  dk(  r& |
|j                         |j                               }	n |
||      }	n| j                   j
                  dk(  r=t               }
 |
|j                  d| j                        |j                  d            }	n,| j                   j
                  dk(  rt               }
 |
||      }	|s|f|d	d z   }|	|	f|z   S |S t!        |	||j"                  
      S )al  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Examples:
        ```python
        >>> import torch
        >>> import requests
        >>> from transformers import TextNetForImageClassification, TextNetImageProcessor
        >>> from PIL import Image

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> processor = TextNetImageProcessor.from_pretrained("czczup/textnet-base")
        >>> model = TextNetForImageClassification.from_pretrained("czczup/textnet-base")

        >>> inputs = processor(images=image, return_tensors="pt")
        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        >>> outputs.logits.shape
        torch.Size([1, 2])
        ```Nr   r   r   
regressionsingle_label_classificationmulti_label_classificationr   r   )losslogitsr5   )r   r   r|   r   r   problem_typer   dtyper=   longrY   r	   squeezer   viewr   r   r5   )r1   r}   r   rt   ru   outputsrw   layerr   r   loss_fctrx   s               r3   r9   z%TextNetForImageClassification.forward'  s   B &1%<k$++B]B],,|BVdo,p#AJ__ 	9E %&7 8	9*+{{''/??a'/;DKK,__q(fllejj.HFLL\a\e\eLe/LDKK,/KDKK,{{''<7"9??a'#FNN$4fnn6FGD#FF3D))-JJ+-B @&++b/R))-II,./Y,F'+'7D7V#CVC3f\c\q\qrrr4   )NNNN)r:   r;   r<   r    r   r   r=   FloatTensor
LongTensorry   r   r9   r>   r?   s   @r3   r   r     s      59-1/3&*Bsu001Bs ))*Bs 'tn	Bs
 d^Bs 
.Bs Bsr4   r   zP
    TextNet backbone, to be used with frameworks like DETR and MaskFormer.
    c                   `     e Zd Z fdZe	 ddedee   dee   dee	e	   e
f   fd       Z xZS )TextNetBackbonec                     t         |   |       t         | 	  |       t        |      | _        |j
                  | _        | j                          y r8   )r   r    _init_backboner   r|   ra   rE   r   r   s     r3   r    zTextNetBackbone.__init__s  sC     v&#F+"// 	r4   r}   rt   ru   r6   c                    ||n| j                   j                  }||n| j                   j                  }| j                  |d|      }|r|j                  n|d   }d}t        | j                        D ]  \  }}|| j                  v s|||   fz  } |s |f}	|r|r|j                  n|d   }|	|fz  }	|	S t        ||r|j                  d      S dd      S )a  
        Examples:

        ```python
        >>> import torch
        >>> import requests
        >>> from PIL import Image
        >>> from transformers import AutoImageProcessor, AutoBackbone

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> processor = AutoImageProcessor.from_pretrained("czczup/textnet-base")
        >>> model = AutoBackbone.from_pretrained("czczup/textnet-base")

        >>> inputs = processor(image, return_tensors="pt")
        >>> with torch.no_grad():
        >>>     outputs = model(**inputs)
        ```NTr   r   r   )feature_mapsr5   
attentions)	r   r   rt   r|   r5   	enumeratestage_namesout_featuresr   )
r1   r}   rt   ru   r   r5   r   idxre   rx   s
             r3   r9   zTextNetBackbone.forward}  s   . &1%<k$++B]B]$8$D $++JjJj 	 ,,|$T_,`1<--'!*#D$4$45 	6JC)))s!3 55	6 "_F#9D 5 5'RS*=**M%3G'//
 	
MQ
 	
r4   rG   )r:   r;   r<   r    r   r   r   ry   r   r&   r   r9   r>   r?   s   @r3   r   r   m  sW     os/
"/
:B4./
^fgk^l/
	uU|^+	,/
 /
r4   r   )r   r   r{   r   )*rX   typingr   r   r   r=   torch.nnr'   r   r   r   r	   transformersr
   transformers.activationsr   transformers.modeling_outputsr   r   r   r   1transformers.models.textnet.configuration_textnetr   transformers.utilsr   !transformers.utils.backbone_utilsr   utilsr   
get_loggerr:   loggerModuler   rA   r[   rn   r{   r   r   r   __all__r   r4   r3   <module>r      s.    ' '    A A ( ,  L & ; # 
		H	%.ryy .DW6")) W6t299 0kRYY k: )_ ) )  "
) "
 "
J Rs$: RsRsj 
;
,m ;

;
| ir4   