
    rh                       d Z ddlZddlmZ ddlmZmZ ddlZddlZddlm	Z	 ddl
mZmZmZ ddlmZmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZ ddlmZmZmZ ddlmZ  ej@                  e!      Z"e ed       G d de                    Z#e ed       G d de                    Z$e ed       G d de                    Z%e ed       G d de                    Z&e ed       G d de                    Z'e ed       G d  d!e                    Z(e ed"       G d# d$e                    Z)e ed%       G d& d'e                    Z*e ed(       G d) d*e                    Z+e ed+       G d, d-e                    Z, G d. d/e	jZ                        Z. G d0 d1e	jZ                        Z/ G d2 d3e	jZ                        Z0 G d4 d5e	jZ                        Z1 G d6 d7e	jZ                        Z2 G d8 d9e	jZ                        Z3 G d: d;e	jZ                        Z4 G d< d=e      Z5 G d> d?e	jZ                        Z6 G d@ dAe	jZ                        Z7 G dB dCe	jZ                        Z8 G dD dEe	jZ                        Z9e G dF dGe             Z: edH       G dI dJe:             Z;dK Z< G dL dMe	jZ                        Z= edN       G dO dPe:             Z> edQ       G dR dSe:             Z? edT       G dU dVe:             Z@ edW       G dX dYe:             ZA edZ       G d[ d\e:             ZB ed]       G d^ d_e:             ZCe G d` dae:             ZDe G db dce:             ZEg ddZFy)ezPyTorch LUKE model.    N)	dataclass)OptionalUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FNgelu)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPooling)PreTrainedModel)apply_chunking_to_forward)ModelOutputauto_docstringlogging   )
LukeConfigz3
    Base class for outputs of the LUKE model.
    )custom_introc                   l    e Zd ZU dZdZeej                     ed<   dZ	ee
ej                  df      ed<   y)BaseLukeModelOutputWithPoolingax  
    pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
        Last layer hidden-state of the first token of the sequence (classification token) further processed by a
        Linear layer and a Tanh activation function.
    entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):
        Sequence of entity hidden-states at the output of the last layer of the model.
    entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
        layer plus the initial entity embedding outputs.
    Nentity_last_hidden_state.entity_hidden_states__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r   tuple     y/var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/luke/modeling_luke.pyr   r   &   s@    
 =Ahu'8'89@DH(5):):C)?#@AHr&   r   zV
    Base class for model's outputs, with potential hidden states and attentions.
    c                   l    e Zd ZU dZdZeej                     ed<   dZ	ee
ej                  df      ed<   y)BaseLukeModelOutputa  
    entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):
        Sequence of entity hidden-states at the output of the last layer of the model.
    entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
        layer plus the initial entity embedding outputs.
    Nr   .r   r   r%   r&   r'   r)   r)   =   s@     =Ahu'8'89@DH(5):):C)?#@AHr&   r)   c                   l   e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eej                     ed<   dZeej                     ed<   dZeej                     ed<   dZeeej                        ed<   dZeeej                  d	f      ed
<   dZeeej                  d	f      ed<   y)LukeMaskedLMOutputa:  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        The sum of masked language modeling (MLM) loss and entity prediction loss.
    mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Masked language modeling (MLM) loss.
    mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Masked entity prediction (MEP) loss.
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
    entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax).
    entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
        layer plus the initial entity embedding outputs.
    Nlossmlm_lossmep_losslogitsentity_logitshidden_states.r   
attentions)r   r   r   r    r,   r   r!   r"   r#   r-   r.   r/   r0   r1   r$   r   r2   r%   r&   r'   r+   r+   Q   s    " )-D(5$$
%,,0Hhu(()0,0Hhu(()0*.FHU&&'.15M8E--.58<M8E%"3"345<DH(5):):C)?#@AH:>Ju00#567>r&   r+   z2
    Outputs of entity classification models.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                  df      ed<   dZeeej                  df      ed<   dZeeej                  df      ed<   y)	EntityClassificationOutput  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Classification loss.
    logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
        Classification scores (before SoftMax).
    entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
        layer plus the initial entity embedding outputs.
    Nr,   r/   .r1   r   r2   r   r   r   r    r,   r   r!   r"   r#   r/   r1   r$   r   r2   r%   r&   r'   r4   r4   s       	 )-D(5$$
%,*.FHU&&'.=AM8E%"3"3S"89:ADH(5):):C)?#@AH:>Ju00#567>r&   r4   z7
    Outputs of entity pair classification models.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                  df      ed<   dZeeej                  df      ed<   dZeeej                  df      ed<   y)	EntityPairClassificationOutputr5   Nr,   r/   .r1   r   r2   r6   r%   r&   r'   r9   r9      r7   r&   r9   z7
    Outputs of entity span classification models.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                  df      ed<   dZeeej                  df      ed<   dZeeej                  df      ed<   y)	EntitySpanClassificationOutputa  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Classification loss.
    logits (`torch.FloatTensor` of shape `(batch_size, entity_length, config.num_labels)`):
        Classification scores (before SoftMax).
    entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
        layer plus the initial entity embedding outputs.
    Nr,   r/   .r1   r   r2   r6   r%   r&   r'   r;   r;      r7   r&   r;   z4
    Outputs of sentence classification models.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                  df      ed<   dZeeej                  df      ed<   dZeeej                  df      ed<   y)	LukeSequenceClassifierOutputa  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Classification (or regression if config.num_labels==1) loss.
    logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
        Classification (or regression if config.num_labels==1) scores (before SoftMax).
    entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
        layer plus the initial entity embedding outputs.
    Nr,   r/   .r1   r   r2   r6   r%   r&   r'   r=   r=      r7   r&   r=   z@
    Base class for outputs of token classification models.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                  df      ed<   dZeeej                  df      ed<   dZeeej                  df      ed<   y)	LukeTokenClassifierOutputa  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Classification loss.
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
        Classification scores (before SoftMax).
    entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
        layer plus the initial entity embedding outputs.
    Nr,   r/   .r1   r   r2   r6   r%   r&   r'   r?   r?      r7   r&   r?   z/
    Outputs of question answering models.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eej                     ed<   dZeeej                  df      ed<   dZeeej                  df      ed<   dZeeej                  df      ed	<   y)
 LukeQuestionAnsweringModelOutputa  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
    entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
        layer plus the initial entity embedding outputs.
    Nr,   start_logits
end_logits.r1   r   r2   )r   r   r   r    r,   r   r!   r"   r#   rB   rC   r1   r$   r   r2   r%   r&   r'   rA   rA      s     )-D(5$$
%,04L(5,,-4.2J**+2=AM8E%"3"3S"89:ADH(5):):C)?#@AH:>Ju00#567>r&   rA   z,
    Outputs of multiple choice models.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                  df      ed<   dZeeej                  df      ed<   dZeeej                  df      ed<   y)	LukeMultipleChoiceModelOutputa  
    loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
        Classification loss.
    logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
        *num_choices* is the second dimension of the input tensors. (see *input_ids* above).

        Classification scores (before SoftMax).
    entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
        shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
        layer plus the initial entity embedding outputs.
    Nr,   r/   .r1   r   r2   r6   r%   r&   r'   rE   rE     s     )-D(5$$
%,*.FHU&&'.=AM8E%"3"3S"89:ADH(5):):C)?#@AH:>Ju00#567>r&   rE   c                   8     e Zd ZdZ fdZ	 	 	 	 ddZd Z xZS )LukeEmbeddingszV
    Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
    c                    t         |           t        j                  |j                  |j
                  |j                        | _        t        j                  |j                  |j
                        | _	        t        j                  |j                  |j
                        | _        t        j                  |j
                  |j                        | _        t        j                  |j                        | _        |j                  | _        t        j                  |j                  |j
                  | j"                        | _	        y )Npadding_idxeps)super__init__r   	Embedding
vocab_sizehidden_sizepad_token_idword_embeddingsmax_position_embeddingsposition_embeddingstype_vocab_sizetoken_type_embeddings	LayerNormlayer_norm_epsDropouthidden_dropout_probdropoutrJ   selfconfig	__class__s     r'   rN   zLukeEmbeddings.__init__(  s    !||F,=,=v?Q?Q_e_r_rs#%<<0N0NPVPbPb#c %'\\&2H2H&J\J\%]" f&8&8f>S>STzz&"<"<= "..#%<<**F,>,>DL\L\$
 r&   c                    |C|0t        || j                        j                  |j                        }n| j	                  |      }||j                         }n|j                         d d }|:t        j                  |t        j                  | j                  j                        }|| j                  |      }| j                  |      }| j                  |      }||z   |z   }| j                  |      }| j                  |      }|S )Ndtypedevice)"create_position_ids_from_input_idsrJ   tore   &create_position_ids_from_inputs_embedssizer!   zeroslongposition_idsrS   rU   rW   rX   r\   )	r^   	input_idstoken_type_idsrl   inputs_embedsinput_shaperU   rW   
embeddingss	            r'   forwardzLukeEmbeddings.forward9  s     $A)TM]M]^aabkbrbrs#JJ=Y #..*K',,.s3K!"[[EJJtO`O`OgOghN  00;M"66|D $ : :> J"%88;PP
^^J/
\\*-
r&   c                    |j                         dd }|d   }t        j                  | j                  dz   || j                  z   dz   t        j                  |j
                        }|j                  d      j                  |      S )z
        We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.

        Args:
            inputs_embeds: torch.Tensor

        Returns: torch.Tensor
        Nrb   r   rc   r   )ri   r!   arangerJ   rk   re   	unsqueezeexpand)r^   ro   rp   sequence_lengthrl   s        r'   rh   z5LukeEmbeddings.create_position_ids_from_inputs_embedsZ  s     $((*3B/%a.||q /D4D4D"Dq"HPUPZPZcpcwcw
 %%a(//<<r&   )NNNN)r   r   r   r    rN   rr   rh   __classcell__r`   s   @r'   rG   rG   #  s&    
& B=r&   rG   c                   ~     e Zd Zdef fdZ	 ddej                  dej                  deej                     fdZ xZ	S )LukeEntityEmbeddingsr_   c                    t         |           || _        t        j                  |j
                  |j                  d      | _        |j                  |j                  k7  r1t        j                  |j                  |j                  d      | _
        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        t        j                  |j                  |j                         | _        t        j"                  |j$                        | _        y )Nr   rI   FbiasrK   )rM   rN   r_   r   rO   entity_vocab_sizeentity_emb_sizeentity_embeddingsrQ   Linearentity_embedding_denserT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   s     r'   rN   zLukeEntityEmbeddings.__init__m  s    !#f.F.FH^H^lm!n!!V%7%77*,))F4J4JFL^L^ej*kD'#%<<0N0NPVPbPb#c %'\\&2H2H&J\J\%]"f&8&8f>S>STzz&"<"<=r&   
entity_idsrl   rn   c                 R   |t        j                  |      }| j                  |      }| j                  j                  | j                  j
                  k7  r| j                  |      }| j                  |j                  d            }|dk7  j                  |      j                  d      }||z  }t        j                  |d      }||j                  d      j                  d      z  }| j                  |      }||z   |z   }| j                  |      }| j                  |      }|S )Nr   )minrb   dimgHz>)r!   
zeros_liker   r_   r   rQ   r   rU   clamptype_asru   sumrW   rX   r\   )	r^   r   rl   rn   r   rU   position_embedding_maskrW   rq   s	            r'   rr   zLukeEntityEmbeddings.forward{  s'    !"--j9N 22:>;;&&$++*A*AA $ ; ;<M N"66|7I7Ia7I7PQ#/2#5">">?R"S"]"]^`"a14KK#ii(;D14K4O4OTV4O4W4]4]bf4]4gg $ : :> J&)<<?TT
^^J/
\\*-
r&   N)
r   r   r   r   rN   r!   
LongTensorr   rr   rx   ry   s   @r'   r{   r{   l  sL    >z >$ 6:	$$ && !!1!12	r&   r{   c                   2     e Zd Z fdZd Z	 	 	 ddZ xZS )LukeSelfAttentionc                     t         |           |j                  |j                  z  dk7  r2t	        |d      s&t        d|j                   d|j                   d      |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _        |j                  | _	        t        j                  |j                  | j                        | _        t        j                  |j                  | j                        | _        t        j                  |j                  | j                        | _        | j                  rt        j                  |j                  | j                        | _        t        j                  |j                  | j                        | _        t        j                  |j                  | j                        | _        t        j$                  |j&                        | _        y )Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .)rM   rN   rQ   num_attention_headshasattr
ValueErrorintattention_head_sizeall_head_sizeuse_entity_aware_attentionr   r   querykeyvalue	w2e_query	e2w_query	e2e_queryrZ   attention_probs_dropout_probr\   r]   s     r'   rN   zLukeSelfAttention.__init__  s    : ::a?PVXhHi"6#5#5"6 7334A7 
 $*#=#= #&v'9'9F<V<V'V#W !558P8PP*0*K*K'YYv1143E3EF
99V//1C1CDYYv1143E3EF
**YYv'9'94;M;MNDNYYv'9'94;M;MNDNYYv'9'94;M;MNDNzz&"E"EFr&   c                     |j                         d d | j                  | j                  fz   } |j                  | }|j	                  dddd      S )Nrb   r      r   r
   )ri   r   r   viewpermute)r^   xnew_x_shapes      r'   transpose_for_scoresz&LukeSelfAttention.transpose_for_scores  sN    ffhsmt'?'?AYAY&ZZAFFK yyAq!$$r&   c                    |j                  d      }||}nt        j                  ||gd      }| j                  | j	                  |            }| j                  | j                  |            }	| j                  r|| j                  | j                  |            }
| j                  | j                  |            }| j                  | j                  |            }| j                  | j                  |            }|d d d d d |d d f   }|d d d d d |d d f   }|d d d d |d d d f   }|d d d d |d d d f   }t        j                  |
|j                  dd            }t        j                  ||j                  dd            }t        j                  ||j                  dd            }t        j                  ||j                  dd            }t        j                  ||gd      }t        j                  ||gd      }t        j                  ||gd      }nF| j                  | j                  |            }t        j                  ||j                  dd            }|t        j                  | j                        z  }|||z   }t         j"                  j%                  |d      }| j'                  |      }|||z  }t        j                  ||	      }|j)                  dddd      j+                         }|j                         d d | j,                  fz   } |j.                  | }|d d d |d d f   }|d }n|d d |d d d f   }|r|||f}|S ||f}|S )Nr   r   rb   r   r
   r   r   )ri   r!   catr   r   r   r   r   r   r   r   matmul	transposemathsqrtr   r   
functionalsoftmaxr\   r   
contiguousr   r   ) r^   word_hidden_statesr   attention_mask	head_maskoutput_attentions	word_sizeconcat_hidden_states	key_layervalue_layerw2w_query_layerw2e_query_layere2w_query_layere2e_query_layerw2w_key_layere2w_key_layerw2e_key_layere2e_key_layerw2w_attention_scoresw2e_attention_scorese2w_attention_scorese2e_attention_scoresword_attention_scoresentity_attention_scoresattention_scoresquery_layerattention_probscontext_layernew_context_layer_shapeoutput_word_hidden_statesoutput_entity_hidden_statesoutputss                                    r'   rr   zLukeSelfAttention.forward  sx    '++A.	'#5 #(99.@BV-W]^#_ --dhh7K.LM	//

;O0PQ**/C/O #77

CU8VWO"77GY8Z[O"77G[8\]O"77G[8\]O &aJYJ&9:M%aJYJ&9:M%aIJ&9:M%aIJ&9:M $)<<AXAXY[]_A`#a #(<<AXAXY[]_A`#a #(<<AXAXY[]_A`#a #(<<AXAXY[]_A`#a  %*II/CEY.Z`a$b!&+ii1EG[0\bc&d#$yy*?AX)Y_`a 33DJJ?S4TUK$||K9L9LRQS9TU+dii8P8P.QQ%/.@ --//0@b/I ,,7  -	9O_kB%--aAq9DDF"/"4"4"6s";t?Q?Q>S"S***,CD$1!ZiZ2B$C!'*.'*79:q8H*I'02M_G  12MNGr&   NNF)r   r   r   rN   r   rr   rx   ry   s   @r'   r   r     s    G0% Pr&   r   c                   n     e Zd Z fdZdej
                  dej
                  dej
                  fdZ xZS )LukeSelfOutputc                 (   t         |           t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        t        j                  |j                        | _
        y NrK   )rM   rN   r   r   rQ   denserX   rY   rZ   r[   r\   r]   s     r'   rN   zLukeSelfOutput.__init__
  s`    YYv1163E3EF
f&8&8f>S>STzz&"<"<=r&   r1   input_tensorreturnc                 r    | j                  |      }| j                  |      }| j                  ||z         }|S r   r   r\   rX   r^   r1   r   s      r'   rr   zLukeSelfOutput.forward  7    

=1]3}|'CDr&   r   r   r   rN   r!   Tensorrr   rx   ry   s   @r'   r   r   	  1    >U\\  RWR^R^ r&   r   c                   2     e Zd Z fdZd Z	 	 	 ddZ xZS )LukeAttentionc                     t         |           t        |      | _        t	        |      | _        t               | _        y r   )rM   rN   r   r^   r   outputsetpruned_headsr]   s     r'   rN   zLukeAttention.__init__  s0    %f-	$V,Er&   c                     t        d      Nz4LUKE does not support the pruning of attention headsNotImplementedError)r^   headss     r'   prune_headszLukeAttention.prune_heads      !"XYYr&   c                 F   |j                  d      }| j                  |||||      }||d   }|}	n3t        j                  |d d d      }t        j                  ||gd      }	| j	                  ||	      }
|
d d d |d d f   }|d }n|
d d |d d d f   }||f|dd  z   }|S )Nr   r   r   r   )ri   r^   r!   r   r   )r^   r   r   r   r   r   r   self_outputsconcat_self_outputsr   attention_outputword_attention_outputentity_attention_outputr   s                 r'   rr   zLukeAttention.forward!  s     '++A.	yy 
  '".q/#5 "'))L!,<!"D#(99.@BV-W]^#_ ;;':<PQ 0JYJ1A B'&*#&6q)*a7G&H# )*AB\RSRTEUUr&   r   )r   r   r   rN   r   rr   rx   ry   s   @r'   r   r     s    "Z "r&   r   c                   V     e Zd Z fdZdej
                  dej
                  fdZ xZS )LukeIntermediatec                    t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        y |j                  | _        y r   )rM   rN   r   r   rQ   intermediate_sizer   
isinstance
hidden_actstrr   intermediate_act_fnr]   s     r'   rN   zLukeIntermediate.__init__H  s]    YYv1163K3KL
f''-'-f.?.?'@D$'-'8'8D$r&   r1   r   c                 J    | j                  |      }| j                  |      }|S r   )r   r   r^   r1   s     r'   rr   zLukeIntermediate.forwardP  s&    

=100?r&   r   ry   s   @r'   r   r   G  s#    9U\\ ell r&   r   c                   n     e Zd Z fdZdej
                  dej
                  dej
                  fdZ xZS )
LukeOutputc                 (   t         |           t        j                  |j                  |j
                        | _        t        j                  |j
                  |j                        | _        t        j                  |j                        | _        y r   )rM   rN   r   r   r   rQ   r   rX   rY   rZ   r[   r\   r]   s     r'   rN   zLukeOutput.__init__X  s`    YYv779K9KL
f&8&8f>S>STzz&"<"<=r&   r1   r   r   c                 r    | j                  |      }| j                  |      }| j                  ||z         }|S r   r   r   s      r'   rr   zLukeOutput.forward^  r   r&   r   ry   s   @r'   r   r   W  r   r&   r   c                   2     e Zd Z fdZ	 	 	 ddZd Z xZS )	LukeLayerc                     t         |           |j                  | _        d| _        t	        |      | _        t        |      | _        t        |      | _	        y Nr   )
rM   rN   chunk_size_feed_forwardseq_len_dimr   	attentionr   intermediater   r   r]   s     r'   rN   zLukeLayer.__init__f  sI    '-'E'E$&v.,V4 (r&   c                 J   |j                  d      }| j                  |||||      }||d   }nt        j                  |d d d      }|dd  }	t	        | j
                  | j                  | j                  |      }
|
d d d |d d f   }|d }n|
d d |d d d f   }||f|	z   }	|	S )Nr   )r   r   r   r   )ri   r  r!   r   r   feed_forward_chunkr  r  )r^   r   r   r   r   r   r   self_attention_outputsconcat_attention_outputr   layer_outputword_layer_outputentity_layer_outputs                r'   rr   zLukeLayer.forwardn  s     '++A.	!% / "0 "
  '&<Q&?#&+ii0Fr0JPQ&R#(,0##T%A%A4CSCSUl
 )JYJ)9:'"&".q)*a/?"@$&9:WDr&   c                 L    | j                  |      }| j                  ||      }|S r   )r	  r   )r^   r   intermediate_outputr  s       r'   r  zLukeLayer.feed_forward_chunk  s,    "//0@A{{#68HIr&   r   )r   r   r   rN   rr   r  rx   ry   s   @r'   r  r  e  s    ) #Jr&   r  c                   0     e Zd Z fdZ	 	 	 	 	 ddZ xZS )LukeEncoderc                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w )NF)
rM   rN   r_   r   
ModuleListrangenum_hidden_layersr  layergradient_checkpointing)r^   r_   _r`   s      r'   rN   zLukeEncoder.__init__  sN    ]]uVE]E]?^#_!If$5#_`
&+# $`s   A#c                 D   |rdnd }|rdnd }	|rdnd }
t        | j                        D ]@  \  }}|r||fz   }|	|fz   }	|||   nd } ||||||      }|d   }||d   }|s8|
|d   fz   }
B |r||fz   }|	|fz   }	|st        d |||
||	fD              S t        |||
||	      S )Nr%   r   r   r   c              3   $   K   | ]  }|| 
 y wr   r%   .0vs     r'   	<genexpr>z&LukeEncoder.forward.<locals>.<genexpr>        
 = 
   )last_hidden_stater1   r2   r   r   )	enumerater  r$   r)   )r^   r   r   r   r   r   output_hidden_statesreturn_dictall_word_hidden_statesall_entity_hidden_statesall_self_attentionsilayer_modulelayer_head_masklayer_outputss                  r'   rr   zLukeEncoder.forward  s/    (<)=24 $5b4(4 	POA|#)?CUBW)W&+CG[F]+](.7.CilO("$!M "/q!1#/'4Q'7$ &9]1=M<O&O#)	P,  %;?Q>S%S"'?CWBY'Y$ 
 '*'(,
 
 
 #00*%9!9
 	
r&   )NNFFTr   r   r   rN   rr   rx   ry   s   @r'   r  r    s    , ":
r&   r  c                   V     e Zd Z fdZdej
                  dej
                  fdZ xZS )
LukePoolerc                     t         |           t        j                  |j                  |j                        | _        t        j                         | _        y r   )rM   rN   r   r   rQ   r   Tanh
activationr]   s     r'   rN   zLukePooler.__init__  s9    YYv1163E3EF
'')r&   r1   r   c                 \    |d d df   }| j                  |      }| j                  |      }|S )Nr   )r   r4  )r^   r1   first_token_tensorpooled_outputs       r'   rr   zLukePooler.forward  s6     +1a40

#566r&   r   ry   s   @r'   r1  r1    s#    $
U\\ ell r&   r1  c                   $     e Zd Z fdZd Z xZS )EntityPredictionHeadTransformc                 h   t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        n|j                  | _        t        j                  |j
                  |j                        | _        y r   )rM   rN   r   r   rQ   r   r   r   r   r   r   transform_act_fnrX   rY   r]   s     r'   rN   z&EntityPredictionHeadTransform.__init__  s{    YYv1163I3IJ
f''-$*6+<+<$=D!$*$5$5D!f&<&<&BWBWXr&   c                 l    | j                  |      }| j                  |      }| j                  |      }|S r   )r   r;  rX   r   s     r'   rr   z%EntityPredictionHeadTransform.forward  s4    

=1--m<}5r&   r/  ry   s   @r'   r9  r9    s    Yr&   r9  c                   $     e Zd Z fdZd Z xZS )EntityPredictionHeadc                     t         |           || _        t        |      | _        t        j                  |j                  |j                  d      | _	        t        j                  t        j                  |j                              | _        y )NFr}   )rM   rN   r_   r9  	transformr   r   r   r   decoder	Parameterr!   rj   r~   r]   s     r'   rN   zEntityPredictionHead.__init__  sa    6v>yy!7!79Q9QX]^LLV-E-E!FG	r&   c                 d    | j                  |      }| j                  |      | j                  z   }|S r   )r@  rA  r~   r   s     r'   rr   zEntityPredictionHead.forward  s-    }5]3dii?r&   r/  ry   s   @r'   r>  r>    s    Hr&   r>  c                   H    e Zd ZU eed<   dZdZddgZdej                  fdZ
y)	LukePreTrainedModelr_   lukeTr   r{   modulec                 j   t        |t        j                        rm|j                  j                  j                  d| j                  j                         |j                  %|j                  j                  j                          yyt        |t        j                        r|j                  dk(  r%|j                  j                  j                          n;|j                  j                  j                  d| j                  j                         |j                  2|j                  j                  |j                     j                          yyt        |t        j                        rJ|j                  j                  j                          |j                  j                  j                  d       yy)zInitialize the weightsg        )meanstdNr         ?)r   r   r   weightdatanormal_r_   initializer_ranger~   zero_rO   embedding_dimrJ   rX   fill_)r^   rG  s     r'   _init_weightsz!LukePreTrainedModel._init_weights  s(   fbii(MM&&CT[[5R5R&S{{&  &&( '-##q(""((*""**9V9V*W!!-""6#5#56<<> .-KK""$MM$$S) .r&   N)r   r   r   r   r#   base_model_prefixsupports_gradient_checkpointing_no_split_modulesr   ModulerS  r%   r&   r'   rE  rE    s0    &*#(*@A*BII *r&   rE  zt
    The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any
    c                        e Zd Zddedef fdZd Zd Zd Zd Z	d Z
e	 	 	 	 	 	 	 	 	 	 	 	 	 dd	eej                     d
eej                     deej                     deej                     deej                     deej                     deej                     deej                     deej                     deej                     dee   dee   dee   deeef   fd       Zdej                  deej                     fdZ xZS )	LukeModelr_   add_pooling_layerc                     t         |   |       || _        t        |      | _        t        |      | _        t        |      | _        |rt        |      nd| _
        | j                          y)zv
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        N)rM   rN   r_   rG   rq   r{   r   r  encoderr1  pooler	post_init)r^   r_   rZ  r`   s      r'   rN   zLukeModel.__init__,  sZ    
 	 (0!5f!="6*,=j(4 	r&   c                 .    | j                   j                  S r   rq   rS   r^   s    r'   get_input_embeddingszLukeModel.get_input_embeddings=  s    ...r&   c                 &    || j                   _        y r   r`  r^   r   s     r'   set_input_embeddingszLukeModel.set_input_embeddings@  s    */'r&   c                 .    | j                   j                   S r   r   ra  s    r'   get_entity_embeddingszLukeModel.get_entity_embeddingsC  s    %%777r&   c                 &    || j                   _         y r   rg  rd  s     r'   set_entity_embeddingszLukeModel.set_entity_embeddingsF  s    380r&   c                     t        d      r   r   )r^   heads_to_prunes     r'   _prune_headszLukeModel._prune_headsI  r   r&   rm   r   rn   rl   r   entity_attention_maskentity_token_type_idsentity_position_idsr   ro   r   r&  r'  r   c           	         ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }||
t	        d      |#| j                  ||       |j                         }n!|
|
j                         dd }nt	        d      |\  }}||j                  n|
j                  }|t        j                  ||f|      }|&t        j                  |t        j                  |      }|V|j                  d      }|t        j                  ||f|      }|(t        j                  ||ft        j                  |      }| j                  |	| j                   j                        }	| j                  ||||
      }| j                  ||      }|d}n| j!                  |||      }| j#                  ||||	|||	      }|d
   }| j$                  | j%                  |      nd}|s
||f|dd z   S t'        |||j(                  |j*                  |j,                  |j.                        S )uz  
        entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
            Indices of entity tokens in the entity vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.
        entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
            Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:

            - 1 for entity tokens that are **not masked**,
            - 0 for entity tokens that are **masked**.
        entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Segment token indices to indicate first and second portions of the entity token inputs. Indices are
            selected in `[0, 1]`:

            - 0 corresponds to a *portion A* entity token,
            - 1 corresponds to a *portion B* entity token.
        entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
            Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, LukeModel

        >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-base")
        >>> model = LukeModel.from_pretrained("studio-ousia/luke-base")
        # Compute the contextualized entity representation corresponding to the entity mention "Beyoncé"

        >>> text = "Beyoncé lives in Los Angeles."
        >>> entity_spans = [(0, 7)]  # character-based entity span corresponding to "Beyoncé"

        >>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
        >>> outputs = model(**encoding)
        >>> word_last_hidden_state = outputs.last_hidden_state
        >>> entity_last_hidden_state = outputs.entity_last_hidden_state
        # Input Wikipedia entities to obtain enriched contextualized representations of word tokens

        >>> text = "Beyoncé lives in Los Angeles."
        >>> entities = [
        ...     "Beyoncé",
        ...     "Los Angeles",
        ... ]  # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
        >>> entity_spans = [
        ...     (0, 7),
        ...     (17, 28),
        ... ]  # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"

        >>> encoding = tokenizer(
        ...     text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt"
        ... )
        >>> outputs = model(**encoding)
        >>> word_last_hidden_state = outputs.last_hidden_state
        >>> entity_last_hidden_state = outputs.entity_last_hidden_state
        ```NzDYou cannot specify both input_ids and inputs_embeds at the same timerb   z5You have to specify either input_ids or inputs_embeds)re   rc   r   )rm   rl   rn   ro   )r   r   r   r&  r'  r   )r$  pooler_outputr1   r2   r   r   )r_   r   r&  use_return_dictr   %warn_if_padding_and_no_attention_maskri   re   r!   onesrj   rk   get_head_maskr  rq   get_extended_attention_maskr   r\  r]  r   r1   r2   r   r   )r^   rm   r   rn   rl   r   rn  ro  rp  r   ro   r   r&  r'  rp   
batch_size
seq_lengthre   entity_seq_lengthword_embedding_outputextended_attention_maskentity_embedding_outputencoder_outputssequence_outputr7  s                            r'   rr   zLukeModel.forwardL  s   R 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B] ]%>cdd"66y.Q#..*K&',,.s3KTUU!,
J%.%:!!@T@T!"ZZZ(@PN!"[[EJJvVN! * 2$,(-

J@Q3R[a(b%$,(-ZAR4S[`[e[ent(u% &&y$++2O2OP	 !%%)'	 !0 !
 #'"B"B>Sh"i &*#&*&<&<ZI\^s&t# ,,!#2/!5# ' 
 *!, 9=8OO4UY#]3oab6III--')77&11%4%M%M!0!E!E
 	
r&   word_attention_maskc                    |}|t        j                  ||gd      }|j                         dk(  r|dddddddf   }n:|j                         dk(  r|ddddddf   }nt        d|j                   d      |j                  | j                        }d	|z
  t        j                  | j                        j                  z  }|S )
ac  
        Makes broadcastable attention and causal masks so that future and masked tokens are ignored.

        Arguments:
            word_attention_mask (`torch.LongTensor`):
                Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore.
            entity_attention_mask (`torch.LongTensor`, *optional*):
                Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore.

        Returns:
            `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
        Nrb   r   r
   r   z&Wrong shape for attention_mask (shape ))rd   rK  )	r!   r   r   r   shaperg   rd   finfor   )r^   r  rn  r   r|  s        r'   rw  z%LukeModel.get_extended_attention_mask  s     - ,"YY8M'NTVWN1$&4Qa]&C#!Q&&4QdA5E&F#EnFZFZE[[\]^^"9"<"<4::"<"N#&)@#@EKKPTPZPZD[D_D_"_&&r&   )T)NNNNNNNNNNNNN)r   r   r   r   boolrN   rb  re  rh  rj  rm  r   r   r!   r   r"   r   r$   r   rr   rw  rx   ry   s   @r'   rY  rY  &  s   z d "/089Z  156:593715=A<@:>1559,0/3&*Y
E,,-Y
 !!2!23Y
 !!1!12	Y

 u//0Y
 U--.Y
  ((9(9:Y
  ((8(89Y
 &e&6&67Y
 E--.Y
   1 12Y
 $D>Y
 'tnY
 d^Y
 
u44	5Y
 Y
v'#(#3#3'LTUZUeUeLf'r&   rY  c                     | j                  |      j                         }t        j                  |d      j	                  |      |z  }|j                         |z   S )a  
    Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
    are ignored. This is modified from fairseq's `utils.make_positions`.

    Args:
        x: torch.Tensor x:

    Returns: torch.Tensor
    r   r   )ner   r!   cumsumr   rk   )rm   rJ   maskincremental_indicess       r'   rf   rf     sP     <<$((*D <<!4<<TBdJ##%33r&   c                   .     e Zd ZdZ fdZd Zd Z xZS )
LukeLMHeadz*Roberta Head for masked language modeling.c                    t         |           t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _
        t        j                  t        j                  |j                              | _        | j                  | j                  _        y r   )rM   rN   r   r   rQ   r   rX   rY   
layer_normrP   rA  rB  r!   rj   r~   r]   s     r'   rN   zLukeLMHead.__init__  s    YYv1163E3EF
,,v'9'9v?T?TUyy!3!3V5F5FGLLV->->!?@	 IIr&   c                     | j                  |      }t        |      }| j                  |      }| j                  |      }|S r   )r   r   r  rA  )r^   featureskwargsr   s       r'   rr   zLukeLMHead.forward$  s;    JJx GOOA LLOr&   c                     | j                   j                  j                  j                  dk(  r| j                  | j                   _        y | j                   j                  | _        y )Nmeta)rA  r~   re   typera  s    r'   _tie_weightszLukeLMHead._tie_weights.  sC     <<##((F2 $		DLL))DIr&   )r   r   r   r    rN   rr   r  rx   ry   s   @r'   r  r    s    4&*r&   r  z
    The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and
    masked entity prediction.
    c            $           e Zd Zg dZ fdZ fdZd Zd Ze	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dde	e
j                     de	e
j                     de	e
j                     d	e	e
j                     d
e	e
j                     de	e
j                     de	e
j                     de	e
j                     de	e
j                     de	e
j                     de	e
j                     de	e
j                     de	e   de	e   de	e   deeef   f d       Z xZS )LukeForMaskedLM)zlm_head.decoder.weightzlm_head.decoder.biasz!entity_predictions.decoder.weightc                     t         |   |       t        |      | _        t	        |      | _        t        |      | _        t        j                         | _
        | j                          y r   )rM   rN   rY  rF  r  lm_headr>  entity_predictionsr   r   loss_fnr^  r]   s     r'   rN   zLukeForMaskedLM.__init__@  sQ     f%	!&)"6v">**, 	r&   c                     t         |           | j                  | j                  j                  | j
                  j                  j                         y r   )rM   tie_weights_tie_or_clone_weightsr  rA  rF  r   )r^   r`   s    r'   r  zLukeForMaskedLM.tie_weightsM  s:    ""4#:#:#B#BDIID_D_DqDqrr&   c                 .    | j                   j                  S r   r  rA  ra  s    r'   get_output_embeddingsz%LukeForMaskedLM.get_output_embeddingsQ  s    ||###r&   c                 &    || j                   _        y r   r  )r^   new_embeddingss     r'   set_output_embeddingsz%LukeForMaskedLM.set_output_embeddingsT  s    -r&   rm   r   rn   rl   r   rn  ro  rp  labelsentity_labelsr   ro   r   r&  r'  r   c                 J   ||n| j                   j                  }| j                  ||||||||||||d      }d}d}| j                  |j                        }|	d|	j                  |j                        }	| j                  |j                  d| j                   j                        |	j                  d            }||}d}d}|j                  l| j                  |j                        }|
O| j                  |j                  d| j                   j                        |
j                  d            }||}n||z   }|s8t        d ||||||j                  |j                  |j                   fD              S t#        ||||||j                  |j                  |j                         S )aC  
        entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
            Indices of entity tokens in the entity vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.
        entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
            Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:

            - 1 for entity tokens that are **not masked**,
            - 0 for entity tokens that are **masked**.
        entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Segment token indices to indicate first and second portions of the entity token inputs. Indices are
            selected in `[0, 1]`:

            - 0 corresponds to a *portion A* entity token,
            - 1 corresponds to a *portion B* entity token.
        entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
            Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
        entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
        NTrm   r   rn   rl   r   rn  ro  rp  r   ro   r   r&  r'  rb   c              3   $   K   | ]  }|| 
 y wr   r%   r  s     r'   r!  z*LukeForMaskedLM.forward.<locals>.<genexpr>  s       = r#  )r,   r-   r.   r/   r0   r1   r   r2   )r_   rs  rF  r  r$  rg   re   r  r   rP   r   r  r   r$   r1   r   r2   r+   )r^   rm   r   rn   rl   r   rn  ro  rp  r  r  r   ro   r   r&  r'  r   r,   r-   r/   r.   r0   s                         r'   rr   zLukeForMaskedLM.forwardW  s   b &1%<k$++B]B]))))%!"7"7 3'/!5  
  g778YYv}}-F||FKKDKK4J4J$KV[[Y[_]H|++7 33G4T4TUM(<<(:(:2t{{?\?\(]_l_q_qrt_uv<#D(?D  !))00&&	   "'!//!(!=!=))	
 		
r&   NNNNNNNNNNNNNNN)r   r   r   _tied_weights_keysrN   r  r  r  r   r   r!   r   r"   r  r   r$   r+   rr   rx   ry   s   @r'   r  r  7  s    qs$.  156:593715<@<@:>-1481559,0/3&*!q
E,,-q
 !!2!23q
 !!1!12	q

 u//0q
 U--.q
  ((8(89q
  ((8(89q
 &e&6&67q
 ))*q
   0 01q
 E--.q
   1 12q
 $D>q
 'tnq
  d^!q
" 
u((	)#q
 q
r&   r  z
    The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity
    token) for entity classification tasks, such as Open Entity.
    c            "           e Zd Z fdZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deej                     deej                     deej                     deej                     d	eej                     d
eej                     deej                     deej                     dee	   dee	   dee	   de
eef   fd       Z xZS )LukeForEntityClassificationc                 ,   t         |   |       t        |      | _        |j                  | _        t        j                  |j                        | _        t        j                  |j                  |j                        | _        | j                          y r   rM   rN   rY  rF  
num_labelsr   rZ   r[   r\   r   rQ   
classifierr^  r]   s     r'   rN   z$LukeForEntityClassification.__init__  si     f%	 ++zz&"<"<=))F$6$68I8IJ 	r&   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r   r&  r'  r   c                    ||n| j                   j                  }| j                  |||||||||	|
||d      }|j                  dddddf   }| j	                  |      }| j                  |      }d}||j                  |j                        }|j                  dk(  r!t        j                  j                  ||      }nMt        j                  j                  |j                  d      |j                  d      j                  |            }|s5t        d |||j                   |j"                  |j$                  fD              S t'        |||j                   |j"                  |j$                        S )	u
  
        entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
            Indices of entity tokens in the entity vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.
        entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
            Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:

            - 1 for entity tokens that are **not masked**,
            - 0 for entity tokens that are **masked**.
        entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Segment token indices to indicate first and second portions of the entity token inputs. Indices are
            selected in `[0, 1]`:

            - 0 corresponds to a *portion A* entity token,
            - 1 corresponds to a *portion B* entity token.
        entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
            Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):
            Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is
            used for the single-label classification. In this case, labels should contain the indices that should be in
            `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy
            loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0
            and 1 indicate false and true, respectively.

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, LukeForEntityClassification

        >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
        >>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")

        >>> text = "Beyoncé lives in Los Angeles."
        >>> entity_spans = [(0, 7)]  # character-based entity span corresponding to "Beyoncé"
        >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
        >>> outputs = model(**inputs)
        >>> logits = outputs.logits
        >>> predicted_class_idx = logits.argmax(-1).item()
        >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
        Predicted class: person
        ```NTr  r   r   rb   c              3   $   K   | ]  }|| 
 y wr   r%   r  s     r'   r!  z6LukeForEntityClassification.forward.<locals>.<genexpr>?        = r#  r,   r/   r1   r   r2   )r_   rs  rF  r   r\   r  rg   re   ndimr   r   cross_entropy binary_cross_entropy_with_logitsr   r   r$   r1   r   r2   r4   r^   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r   r&  r'  r   feature_vectorr/   r,   s                      r'   rr   z#LukeForEntityClassification.forward  sm   | &1%<k$++B]B]))))%!"7"7 3'/!5  
  !99!Q'Bn50 YYv}}-F{{a}}2266B}}EEfkkRToW]WbWbceWfWnWnouWvw (=(=w?[?[]d]o]op   *!//!(!=!=))
 	
r&   NNNNNNNNNNNNNN)r   r   r   rN   r   r   r!   r   r"   r  r   r$   r4   rr   rx   ry   s   @r'   r  r    s}   
  156:593715=A<@:>1559.2,0/3&*k
E,,-k
 !!2!23k
 !!1!12	k

 u//0k
 U--.k
  ((9(9:k
  ((8(89k
 &e&6&67k
 E--.k
   1 12k
 **+k
 $D>k
 'tnk
 d^k
  
u00	1!k
 k
r&   r  z
    The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity
    tokens) for entity pair classification tasks, such as TACRED.
    c            "           e Zd Z fdZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deej                     deej                     deej                     deej                     d	eej                     d
eej                     deej                     deej                     dee	   dee	   dee	   de
eef   fd       Z xZS )LukeForEntityPairClassificationc                 4   t         |   |       t        |      | _        |j                  | _        t        j                  |j                        | _        t        j                  |j                  dz  |j                  d      | _        | j                          y )Nr   Fr  r]   s     r'   rN   z(LukeForEntityPairClassification.__init__U  sp     f%	 ++zz&"<"<=))F$6$6$:F<M<MuU 	r&   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r   r&  r'  r   c                 :   ||n| j                   j                  }| j                  |||||||||	|
||d      }t        j                  |j
                  dddddf   |j
                  dddddf   gd      }| j                  |      }| j                  |      }d}||j                  |j                        }|j                  dk(  r!t        j                  j                  ||      }nMt        j                  j                  |j                  d      |j                  d      j!                  |            }|s5t#        d |||j$                  |j&                  |j(                  fD              S t+        |||j$                  |j&                  |j(                  	      S )
u  
        entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
            Indices of entity tokens in the entity vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.
        entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
            Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:

            - 1 for entity tokens that are **not masked**,
            - 0 for entity tokens that are **masked**.
        entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Segment token indices to indicate first and second portions of the entity token inputs. Indices are
            selected in `[0, 1]`:

            - 0 corresponds to a *portion A* entity token,
            - 1 corresponds to a *portion B* entity token.
        entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
            Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):
            Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is
            used for the single-label classification. In this case, labels should contain the indices that should be in
            `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy
            loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0
            and 1 indicate false and true, respectively.

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, LukeForEntityPairClassification

        >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
        >>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred")

        >>> text = "Beyoncé lives in Los Angeles."
        >>> entity_spans = [
        ...     (0, 7),
        ...     (17, 28),
        ... ]  # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
        >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
        >>> outputs = model(**inputs)
        >>> logits = outputs.logits
        >>> predicted_class_idx = logits.argmax(-1).item()
        >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
        Predicted class: per:cities_of_residence
        ```NTr  r   r   r   rb   c              3   $   K   | ]  }|| 
 y wr   r%   r  s     r'   r!  z:LukeForEntityPairClassification.forward.<locals>.<genexpr>  r  r#  r  )r_   rs  rF  r!   r   r   r\   r  rg   re   r  r   r   r  r  r   r   r$   r1   r   r2   r9   r  s                      r'   rr   z'LukeForEntityPairClassification.forwarda  s   B &1%<k$++B]B]))))%!"7"7 3'/!5  
  --aAg68X8XYZ\]_`Y`8abhi
 n50 YYv}}-F{{a}}2266B}}EEfkkRToW]WbWbceWfWnWnouWvw (=(=w?[?[]d]o]op   .!//!(!=!=))
 	
r&   r  )r   r   r   rN   r   r   r!   r   r"   r  r   r$   r9   rr   rx   ry   s   @r'   r  r  N  s}   
  156:593715=A<@:>1559-1,0/3&*p
E,,-p
 !!2!23p
 !!1!12	p

 u//0p
 U--.p
  ((9(9:p
  ((8(89p
 &e&6&67p
 E--.p
   1 12p
 ))*p
 $D>p
 'tnp
 d^p
  
u44	5!p
 p
r&   r  z
    The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks
    such as named entity recognition.
    c            &           e Zd Z fdZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deej                     deej                     deej                     deej                     d	eej                     d
eej                     deej                     deej                     deej                     deej                     dee	   dee	   dee	   de
eef   f"d       Z xZS )LukeForEntitySpanClassificationc                 2   t         |   |       t        |      | _        |j                  | _        t        j                  |j                        | _        t        j                  |j                  dz  |j                        | _        | j                          y )Nr
   r  r]   s     r'   rN   z(LukeForEntitySpanClassification.__init__  sn     f%	 ++zz&"<"<=))F$6$6$:F<M<MN 	r&   rm   r   rn   rl   r   rn  ro  rp  entity_start_positionsentity_end_positionsr   ro   r  r   r&  r'  r   c                    ||n| j                   j                  }| j                  ||||||||||||d      }|j                  j	                  d      }|	j                  d      j                  dd|      }	|	j                  |j                  j                  k7  r%|	j                  |j                  j                        }	t        j                  |j                  d|	      }|
j                  d      j                  dd|      }
|
j                  |j                  j                  k7  r%|
j                  |j                  j                        }
t        j                  |j                  d|
      }t        j                  |||j                  gd      }| j                  |      }| j                  |      }d}||j                  |j                        }|j                  dk(  rJt         j"                  j%                  |j'                  d| j(                        |j'                  d            }nMt         j"                  j+                  |j'                  d      |j'                  d      j-                  |            }|s5t/        d |||j0                  |j2                  |j4                  fD              S t7        |||j0                  |j2                  |j4                  	      S )
u  
        entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
            Indices of entity tokens in the entity vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.
        entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
            Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:

            - 1 for entity tokens that are **not masked**,
            - 0 for entity tokens that are **masked**.
        entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Segment token indices to indicate first and second portions of the entity token inputs. Indices are
            selected in `[0, 1]`:

            - 0 corresponds to a *portion A* entity token,
            - 1 corresponds to a *portion B* entity token.
        entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
            Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        entity_start_positions (`torch.LongTensor`):
            The start positions of entities in the word token sequence.
        entity_end_positions (`torch.LongTensor`):
            The end positions of entities in the word token sequence.
        labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*):
            Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross
            entropy loss is used for the single-label classification. In this case, labels should contain the indices
            that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length,
            num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case,
            labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively.

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, LukeForEntitySpanClassification

        >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
        >>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")

        >>> text = "Beyoncé lives in Los Angeles"
        # List all possible entity spans in the text

        >>> word_start_positions = [0, 8, 14, 17, 21]  # character-based start positions of word tokens
        >>> word_end_positions = [7, 13, 16, 20, 28]  # character-based end positions of word tokens
        >>> entity_spans = []
        >>> for i, start_pos in enumerate(word_start_positions):
        ...     for end_pos in word_end_positions[i:]:
        ...         entity_spans.append((start_pos, end_pos))

        >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
        >>> outputs = model(**inputs)
        >>> logits = outputs.logits
        >>> predicted_class_indices = logits.argmax(-1).squeeze().tolist()
        >>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices):
        ...     if predicted_class_idx != 0:
        ...         print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx])
        Beyoncé PER
        Los Angeles LOC
        ```NTr  rb   r   r   r   c              3   $   K   | ]  }|| 
 y wr   r%   r  s     r'   r!  z:LukeForEntitySpanClassification.forward.<locals>.<genexpr>e  r  r#  r  )r_   rs  rF  r$  ri   ru   rv   re   rg   r!   gatherr   r   r\   r  r  r   r   r  r   r  r  r   r$   r1   r   r2   r;   )r^   rm   r   rn   rl   r   rn  ro  rp  r  r  r   ro   r  r   r&  r'  r   rQ   start_states
end_statesr  r/   r,   s                           r'   rr   z'LukeForEntitySpanClassification.forward  s   ^ &1%<k$++B]B]))))%!"7"7 3'/!5  
 //44R8!7!A!A"!E!L!LRQSU`!a!((G,E,E,L,LL%;%>%>w?X?X?_?_%`"||G$=$=rCYZ3==bAHHRQ\]&&'*C*C*J*JJ#7#:#:7;T;T;[;[#\ \\'";";RAUV
L*g>^>^#_efgn50YYv}}-F {{a}}226;;r4??3SU[U`U`acUde}}EEfkkRToW]WbWbceWfWnWnouWvw (=(=w?[?[]d]o]op   .!//!(!=!=))
 	
r&   )NNNNNNNNNNNNNNNN)r   r   r   rN   r   r   r!   r   r"   r  r   r$   r;   rr   rx   ry   s   @r'   r  r    s   
  156:593715<@<@:>=A;?1559-1,0/3&*#H
E,,-H
 !!2!23H
 !!1!12	H

 u//0H
 U--.H
  ((8(89H
  ((8(89H
 &e&6&67H
 !))9)9 :H
 'u'7'78H
 E--.H
   1 12H
 ))*H
 $D>H
  'tn!H
" d^#H
$ 
u44	5%H
 H
r&   r  z
    The LUKE Model transformer with a sequence classification/regression head on top (a linear layer on top of the
    pooled output) e.g. for GLUE tasks.
    c            "           e Zd Z fdZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deej                     deej                     deej                     deej                     d	eej                     d
eej                     deej                     deej                     dee	   dee	   dee	   de
eef   fd       Z xZS )LukeForSequenceClassificationc                 \   t         |   |       |j                  | _        t        |      | _        t        j                  |j                  |j                  n|j                        | _	        t        j                  |j                  |j                        | _        | j                          y r   rM   rN   r  rY  rF  r   rZ   classifier_dropoutr[   r\   r   rQ   r  r^  r]   s     r'   rN   z&LukeForSequenceClassification.__init__{  s      ++f%	zz)/)B)B)NF%%TZTnTn
 ))F$6$68I8IJ 	r&   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r   r&  r'  r   c                    ||n| j                   j                  }| j                  |||||||||	|
||d      }|j                  }| j	                  |      }| j                  |      }d}||j                  |j                        }| j                   j                  | j                  dk(  rd| j                   _        nl| j                  dkD  rL|j                  t        j                  k(  s|j                  t        j                  k(  rd| j                   _        nd| j                   _        | j                   j                  dk(  rIt               }| j                  dk(  r& ||j                         |j                               }n |||      }n| j                   j                  dk(  r=t!               } ||j#                  d| j                        |j#                  d            }n,| j                   j                  dk(  rt%               } |||      }|s5t'        d	 |||j(                  |j*                  |j,                  fD              S t/        |||j(                  |j*                  |j,                  
      S )a  
        entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
            Indices of entity tokens in the entity vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.
        entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
            Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:

            - 1 for entity tokens that are **not masked**,
            - 0 for entity tokens that are **masked**.
        entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Segment token indices to indicate first and second portions of the entity token inputs. Indices are
            selected in `[0, 1]`:

            - 0 corresponds to a *portion A* entity token,
            - 1 corresponds to a *portion B* entity token.
        entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
            Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        NTr  r   
regressionsingle_label_classificationmulti_label_classificationrb   c              3   $   K   | ]  }|| 
 y wr   r%   r  s     r'   r!  z8LukeForSequenceClassification.forward.<locals>.<genexpr>  r  r#  r  )r_   rs  rF  rr  r\   r  rg   re   problem_typer  rd   r!   rk   r   r	   squeezer   r   r   r$   r1   r   r2   r=   )r^   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r   r&  r'  r   r7  r/   r,   loss_fcts                       r'   rr   z%LukeForSequenceClassification.forward  s#   V &1%<k$++B]B]))))%!"7"7 3'/!5  
   --]3/YYv}}-F{{''/??a'/;DKK,__q(fllejj.HFLL\a\e\eLe/LDKK,/KDKK,{{''<7"9??a'#FNN$4fnn6FGD#FF3D))-JJ+-B @&++b/R))-II,./ (=(=w?[?[]d]o]op   ,!//!(!=!=))
 	
r&   r  )r   r   r   rN   r   r   r!   r   r"   r  r   r$   r=   rr   rx   ry   s   @r'   r  r  t  s}   
  156:593715=A<@:>1559.2,0/3&*g
E,,-g
 !!2!23g
 !!1!12	g

 u//0g
 U--.g
  ((9(9:g
  ((8(89g
 &e&6&67g
 E--.g
   1 12g
 **+g
 $D>g
 'tng
 d^g
  
u22	3!g
 g
r&   r  z
    The LUKE Model with a token classification head on top (a linear layer on top of the hidden-states output). To
    solve Named-Entity Recognition (NER) task using LUKE, `LukeForEntitySpanClassification` is more suitable than this
    class.
    c            "           e Zd Z fdZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deej                     deej                     deej                     deej                     d	eej                     d
eej                     deej                     deej                     dee	   dee	   dee	   de
eef   fd       Z xZS )LukeForTokenClassificationc                 `   t         |   |       |j                  | _        t        |d      | _        t        j                  |j                  |j                  n|j                        | _	        t        j                  |j                  |j                        | _        | j                          y NF)rZ  r  r]   s     r'   rN   z#LukeForTokenClassification.__init__  s      ++f>	zz)/)B)B)NF%%TZTnTn
 ))F$6$68I8IJ 	r&   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r   r&  r'  r   c                 N   ||n| j                   j                  }| j                  |||||||||	|
||d      }|j                  }| j	                  |      }| j                  |      }d}|W|j                  |j                        }t               } ||j                  d| j                        |j                  d            }|s5t        d |||j                  |j                  |j                  fD              S t        |||j                  |j                  |j                        S )aM  
        entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
            Indices of entity tokens in the entity vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.
        entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
            Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:

            - 1 for entity tokens that are **not masked**,
            - 0 for entity tokens that are **masked**.
        entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Segment token indices to indicate first and second portions of the entity token inputs. Indices are
            selected in `[0, 1]`:

            - 0 corresponds to a *portion A* entity token,
            - 1 corresponds to a *portion B* entity token.
        entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
            Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
            `input_ids` above)
        NTr  rb   c              3   $   K   | ]  }|| 
 y wr   r%   r  s     r'   r!  z5LukeForTokenClassification.forward.<locals>.<genexpr>Q  r  r#  r  )r_   rs  rF  r$  r\   r  rg   re   r   r   r  r$   r1   r   r2   r?   )r^   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r   r&  r'  r   r  r/   r,   r  s                       r'   rr   z"LukeForTokenClassification.forward  s1   V &1%<k$++B]B]))))%!"7"7 3'/!5  
  "33,,71YYv}}-F')HFKKDOO<fkk"oND (=(=w?[?[]d]o]op   )!//!(!=!=))
 	
r&   r  )r   r   r   rN   r   r   r!   r   r"   r  r   r$   r?   rr   rx   ry   s   @r'   r  r    s}     156:593715=A<@:>1559.2,0/3&*U
E,,-U
 !!2!23U
 !!1!12	U

 u//0U
 U--.U
  ((9(9:U
  ((8(89U
 &e&6&67U
 E--.U
   1 12U
 **+U
 $D>U
 'tnU
 d^U
  
u//	0!U
 U
r&   r  c            $           e Zd Z fdZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deej                     deej                     deej                     deej                     d	eej                     d
eej                     deej                     deej                     deej                     dee	   dee	   dee	   de
eef   f d       Z xZS )LukeForQuestionAnsweringc                     t         |   |       |j                  | _        t        |d      | _        t        j                  |j                  |j                        | _        | j                          y r  )
rM   rN   r  rY  rF  r   r   rQ   
qa_outputsr^  r]   s     r'   rN   z!LukeForQuestionAnswering.__init__b  sU      ++f>	))F$6$68I8IJ 	r&   rm   r   rn   rl   r   rn  ro  rp  r   ro   start_positionsend_positionsr   r&  r'  r   c                 `   ||n| j                   j                  }| j                  |||||||||	|
||d      }|j                  }| j	                  |      }|j                  dd      \  }}|j                  d      }|j                  d      }d}||t        |j                               dkD  r|j                  d      }t        |j                               dkD  r|j                  d      }|j                  d      }|j                  d|       |j                  d|       t        |      } |||      } |||      }||z   d	z  }|s6t        d
 ||||j                  |j                  |j                  fD              S t        ||||j                  |j                  |j                        S )a  
        entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
            Indices of entity tokens in the entity vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.
        entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
            Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:

            - 1 for entity tokens that are **not masked**,
            - 0 for entity tokens that are **masked**.
        entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Segment token indices to indicate first and second portions of the entity token inputs. Indices are
            selected in `[0, 1]`:

            - 0 corresponds to a *portion A* entity token,
            - 1 corresponds to a *portion B* entity token.
        entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
            Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        NTr  r   rb   r   r   )ignore_indexr   c              3   $   K   | ]  }|| 
 y wr   r%   r  s     r'   r!  z3LukeForQuestionAnswering.forward.<locals>.<genexpr>  s       = r#  )r,   rB   rC   r1   r   r2   )r_   rs  rF  r$  r  splitr  lenri   clamp_r   r$   r1   r   r2   rA   )r^   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r  r   r&  r'  r   r  r/   rB   rC   
total_lossignored_indexr  
start_lossend_losss                             r'   rr   z LukeForQuestionAnswering.forwardm  s   P &1%<k$++B]B]))))%!"7"7 3'/!5  
  "331#)<<r<#: j#++B/''+

&=+D?'')*Q."1"9"9""==%%'(1, - 5 5b 9(--a0M""1m4  M2']CH!,@J
M:H$x/14J   ))00&&   0%!!//!(!=!=))
 	
r&   r  )r   r   r   rN   r   r   r!   r   r"   r  r   r$   rA   rr   rx   ry   s   @r'   r  r  `  s   	  156:594815=A<@:>15596:48,0/3&*!f
E,,-f
 !!2!23f
 !!1!12	f

 u001f
 U--.f
  ((9(9:f
  ((8(89f
 &e&6&67f
 E--.f
   1 12f
 "%"2"23f
   0 01f
 $D>f
 'tnf
  d^!f
" 
u66	7#f
 f
r&   r  c            "           e Zd Z fdZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deej                     deej                     deej                     deej                     d	eej                     d
eej                     deej                     deej                     dee	   dee	   dee	   de
eef   fd       Z xZS )LukeForMultipleChoicec                 &   t         |   |       t        |      | _        t	        j
                  |j                  |j                  n|j                        | _        t	        j                  |j                  d      | _        | j                          y r  )rM   rN   rY  rF  r   rZ   r  r[   r\   r   rQ   r  r^  r]   s     r'   rN   zLukeForMultipleChoice.__init__  so     f%	zz)/)B)B)NF%%TZTnTn
 ))F$6$6: 	r&   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r   r&  r'  r   c                 :   ||n| j                   j                  }||j                  d   n|
j                  d   }|!|j                  d|j	                  d            nd}|!|j                  d|j	                  d            nd}|!|j                  d|j	                  d            nd}|!|j                  d|j	                  d            nd}|
1|
j                  d|
j	                  d      |
j	                  d            nd}
|!|j                  d|j	                  d            nd}|!|j                  d|j	                  d            nd}|!|j                  d|j	                  d            nd}|1|j                  d|j	                  d      |j	                  d            nd}| j                  |||||||||	|
||d      }|j                  }| j                  |      }| j                  |      }|j                  d|      }d}|.|j                  |j                        }t               } |||      }|s5t        d |||j                  |j                  |j                  fD              S t!        |||j                  |j                  |j                        S )	a^  
        input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

            [What are position IDs?](../glossary#position-ids)
        entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
            Indices of entity tokens in the entity vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.
        entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
            Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:

            - 1 for entity tokens that are **not masked**,
            - 0 for entity tokens that are **masked**.
        entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
            Segment token indices to indicate first and second portions of the entity token inputs. Indices are
            selected in `[0, 1]`:

            - 0 corresponds to a *portion A* entity token,
            - 1 corresponds to a *portion B* entity token.
        entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
            Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
            `input_ids` above)
        Nr   rb   r   Tr  c              3   $   K   | ]  }|| 
 y wr   r%   r  s     r'   r!  z0LukeForMultipleChoice.forward.<locals>.<genexpr>d  r"  r#  r  )r_   rs  r  r   ri   rF  rr  r\   r  rg   re   r   r$   r1   r   r2   rE   )r^   rm   r   rn   rl   r   rn  ro  rp  r   ro   r  r   r&  r'  num_choicesr   r7  r/   reshaped_logitsr,   r  s                         r'   rr   zLukeForMultipleChoice.forward  s   F &1%<k$++B]B],5,Aiooa(}GZGZ[\G]>G>SINN2y~~b'9:Y]	M[Mg,,R1D1DR1HImqM[Mg,,R1D1DR1HImqGSG_|((\->->r-BCei ( r=#5#5b#9=;M;Mb;QR 	 BLAWZ__R)<=]a
 %0 "&&r+@+E+Eb+IJ 	 %0 "&&r+@+E+Eb+IJ 	 #.  $$R)<)A)A")EGZG_G_`bGcd 	 ))))%!"7"7 3'/!5  
   --]3/ ++b+6YY556F')HOV4D 
 #))00&&
 
 
 -"!//!(!=!=))
 	
r&   r  )r   r   r   rN   r   r   r!   r   r"   r  r   r$   rE   rr   rx   ry   s   @r'   r  r    s}   
  156:593715=A<@:>1559.2,0/3&*P
E,,-P
 !!2!23P
 !!1!12	P

 u//0P
 U--.P
  ((9(9:P
  ((8(89P
 &e&6&67P
 E--.P
   1 12P
 **+P
 $D>P
 'tnP
 d^P
  
u33	4!P
 P
r&   r  )
r  r  r  r  r  r  r  r  rY  rE  )Gr    r   dataclassesr   typingr   r   r!   torch.utils.checkpointr   torch.nnr   r   r	   activationsr   r   modeling_layersr   modeling_outputsr   r   modeling_utilsr   pytorch_utilsr   utilsr   r   r   configuration_luker   
get_loggerr   loggerr   r)   r+   r4   r9   r;   r=   r?   rA   rE   rW  rG   r{   r   r   r   r   r   r  r  r1  r9  r>  rE  rY  rf   r  r  r  r  r  r  r  r  r  __all__r%   r&   r'   <module>r     s}     ! "    A A ' 9 K - 6 9 9 * 
		H	% 
I%? I I" 
I/ I I 
? ? ?8 
? ? ?& 
?[ ? ?& 
?[ ? ?& 
?; ? ?& 
? ? ?& 
?{ ? ?$ 
?K ? ?*F=RYY F=R(299 (Vn		 ndRYY ,BII ,`ryy   1* 1hA
")) A
J BII "299  */ * *0 
Y'# Y'
Y'x4"* *> L
) L
L
^ y
"5 y
y
x ~
&9 ~
~
B V
&9 V
V
r u
$7 u
u
p d
!4 d
d
N s
2 s
 s
l ^
/ ^
 ^
Br&   