
    rh
                     	   U d dl Z d dlmZmZmZ d dlZd dlmc mZ	 ddl
mZ ddlmZ ddlmZ ddlmZ ddlmZmZmZ  e       rd d	lmZ d d
lmZmZ nej6                  Z edd      Z edd      Z e       Zerd dlm Z  de!e   defdZ"de!e   defdZ#de$de$de$de$de%f
dZ&de$defdZ'de$defdZ(de$defdZ)de$defdZ*dej6                  defd Z+d!ej6                  defd"Z,d#ed$e$d%e$defd&Z-dYd#ed'e%defd(Z.	 dYd)eej6                     d*e$d%e$d+e%deej6                     f
d,Z/	 dZdeej6                     d-e$d*e$d%e$d.ee$   de%fd/Z0d e&dddfd0e$d1ej6                  d*e$d%e$d#ed)eej6                     d2ee$   d3e%deej6                     fd4Z1d e&ddddfd0e$d1ej6                  d*e$d%e$d#ed)eej6                     d2ee$   d3e%d5e%deej6                     fd6Z2ere1ne2Z3d e&dejh                  fd0e$d1ej6                  d*e$d%e$d#ed)eej6                     d7ejj                  dej6                  fd8Z6d e&dfd0e$d1ej6                  d*e$d%e$d#ed)eej6                     fd9Z7d e&dfd0e$d1ej6                  d*e$d%e$d#ed)eej6                     defd:Z8 G d; d<e      Z9 e9       Z:e9e;d=<   d>ej6                  dej6                  fd?Z<d@edAej6                  d)eeej6                  ef      d1ej6                  dBee   d>eej6                     dCee$   de=e%eeej6                  ef      e$e$f   fdDZ>	 	 	 d[d@edAej6                  d)eej6                     d1ej6                  dBee   d>eej6                     dEee   dFee   deeej6                  ef      fdGZ?	 	 	 d[d@edAej6                  d)eej6                     d1ej6                  dBee   d>eej6                     dEee   dFee   deeej6                  ef      fdHZ@	 	 	 d[d@edAej6                  d)eej6                     d1ej6                  dBee   d>eej6                     dEee   dFee   deeej6                  ef      fdIZAe?e@eAdJZB	 	 	 d[d@edAej6                  d)eej6                     d1ej6                  dBee   d>eej6                     dEee   dFee   fdKZCdLZDdMZEdNZFdOZGdPZHdQZIdRZJdSZKdT ZLeE eG eF ZMeD eG eF ZNd\dUej6                  deOfdVZP G dW dXej6                        ZQy)]    N)CallableOptionalUnion   )Cache)PretrainedConfig)is_torch_xpu_available)GeneralInterface)is_torch_flex_attn_availableis_torch_greater_or_equalis_torchdynamo_compiling)_DEFAULT_SPARSE_BLOCK_SIZE)	BlockMaskcreate_block_maskz2.5T)
accept_devz2.6)TransformGetItemToIndexmask_functionsreturnc                  R     t        d  D              st        d         fd}|S )zKReturns a mask function that is the intersection of provided mask functionsc              3   2   K   | ]  }t        |        y wNcallable.0args     m/var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/masking_utils.py	<genexpr>zand_masks.<locals>.<genexpr>-        7x}7   .All inputs should be callable mask_functions: c                     |j                  dt        j                        }D ])  }| || |||      j                  |j                        z  }+ |S N )dtype)new_onestorchbooltodevice	batch_idxhead_idxq_idxkv_idxresultmaskr   s         r   and_maskzand_masks.<locals>.and_mask0   sS    %**5" 	YDd9hvFII&--XXF	Y    allRuntimeError)r   r2   s   ` r   	and_masksr7   +   s1    777KNK[\]] Or3   c                  R     t        d  D              st        d         fd}|S )zDReturns a mask function that is the union of provided mask functionsc              3   2   K   | ]  }t        |        y wr   r   r   s     r   r   zor_masks.<locals>.<genexpr>;   r   r    r!   c                     |j                  dt        j                        }D ])  }| || |||      j                  |j                        z  }+ |S r#   )	new_zerosr'   r(   r)   r*   r+   s         r   or_maskzor_masks.<locals>.or_mask>   sS    5::6" 	YDd9hvFII&--XXF	Yr3   r4   )r   r<   s   ` r   or_masksr=   9   s1    777KNK[\]] Nr3   r,   r-   r.   r/   c                     ||k  S )z:
    This creates a basic lower-diagonal causal mask.
    r$   )r,   r-   r.   r/   s       r   causal_mask_functionr?   G   s     U?r3   sliding_windowc           
      P     dt         dt         dt         dt         dt        f
 fd}|S )z
    This is an overlay depicting a sliding window pattern. Add it on top of a causal mask for a proper sliding
    window mask.
    r,   r-   r.   r/   r   c                     ||z
  kD  S r   r$   )r,   r-   r.   r/   r@   s       r   
inner_maskz*sliding_window_overlay.<locals>.inner_maskT   s    ...r3   intr(   )r@   rC   s   ` r   sliding_window_overlayrF   N   s3    /c /S / /c /d / r3   
chunk_sizec           
      P     dt         dt         dt         dt         dt        f
 fd}|S )z
    This is an overlay depicting a chuned attention pattern. Add it on top of a causal mask for a proper chunked
    attention mask.
    r,   r-   r.   r/   r   c                     |z  |z  k(  S r   r$   )r,   r-   r.   r/   rG   s       r   rC   z#chunked_overlay.<locals>.inner_mask`   s    #u
':::r3   rD   )rG   rC   s   ` r   chunked_overlayrJ   Z   s3    ;c ;S ; ;c ;d ; r3   c                 4    t        t        |       t              S )zQ
    This return the mask_function function to create a sliding window mask.
    )r7   rF   r?   )r@   s    r   #sliding_window_causal_mask_functionrL   f   s     +N;=QRRr3   c                 4    t        t        |       t              S )zT
    This return the mask_function function to create a chunked attention mask.
    )r7   rJ   r?   )rG   s    r   chunked_causal_mask_functionrN   m   s     _Z02FGGr3   padding_maskc           
      P     dt         dt         dt         dt         dt        f
 fd}|S )zT
    This return the mask_function function corresponding to a 2D padding mask.
    r,   r-   r.   r/   r   c                     | |f   S r   r$   )r,   r-   r.   r/   rO   s       r   rC   z)padding_mask_function.<locals>.inner_masky   s     Iv-..r3   rD   )rO   rC   s   ` r   padding_mask_functionrR   t   s3    
/c /S / /c /d / r3   packed_sequence_maskc           
      P     dt         dt         dt         dt         dt        f
 fd}|S )z\
    This return the mask_function function corresponding to a 2D packed sequence mask.
    r,   r-   r.   r/   r   c                 "    | |f   | |f   k(  S r   r$   )r,   r-   r.   r/   rS   s       r   rC   z1packed_sequence_mask_function.<locals>.inner_mask   s$    #Iu$459MiY_N_9```r3   rD   )rS   rC   s   ` r   packed_sequence_mask_functionrV      s9    
ac aS a ac ad a r3   mask_functionq_offset	kv_offsetc           
      X     dt         dt         dt         dt         dt        f
 fd}|S )z
    This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
    not start and end indices.
    r,   r-   r.   r/   r   c                 &     | ||z   |z         S r   r$   )r,   r-   r.   r/   rY   rW   rX   s       r   rC   z0add_offsets_to_mask_function.<locals>.inner_mask   s    Y%(2BFYDVWWr3   rD   )rW   rX   rY   rC   s   ``` r   add_offsets_to_mask_functionr\      s9    Xc XS X Xc Xd X r3   
bh_indicesc                 v    ddg}|r|j                  ddg       |D ]  }t        j                  | |d      }  | S )a  
    Used to vmap our mask_functions over the q_idx and kv_idx dimensions of the inputs. Optionally, vmap over
    the batch and head indices as well if `bh_indices=True`.
    Using vmap here allows us to keep the performance of vectorized ops, while having a single set of primitive
    functions between attention interfaces (i.e. between flex and sdpa/eager, FA2 being a bit different).

    Args:
        mask_function (`Callable`):
            The mask_function to vmap.
        bh_indices (`bool`, optional):
            Whether to vmap over the batch and head indices as well, or only q and kv indices.

    Returns:
        Callable: The vmapped function.
    )NNNr   )NNr   N)Nr   NN)r   NNNr   )in_dimsout_dims)extendr'   vmap)rW   r]   
dimensionsdimss       r   _vmap_for_bhqkvre      sP    " ()>?J02GHI L

=$KLr3   attention_mask	kv_length_slicec                     | }| w||z   | j                   d   z
  x}dkD  r,t        j                  j                  j	                  | d|f      }|r/t        j
                  ||j                        }||z  }|dd|f   }|S )z
    From the 2D attention mask, prepare the correct padding mask to use by potentially padding it, and slicing
    according to the `kv_offset` if `_slice` is `True`.
    Nr   r*   )shaper'   nn
functionalpadaranger*   )rf   rg   rY   rh   local_padding_maskpadding_lengthmask_indicess          r   prepare_padding_maskrt      s     (!')3n6J6J26NNNNRSS!&!4!4!8!8!^I\!] !<<	:L:S:STLI%L!3A|O!Dr3   query_lengthlocal_attention_sizec                    t         j                  j                         xs0 t        | t         j                  j
                        xs
 t               }| A| j                  d   |kD  r/t        j                  || j                        }||z  }| dd|f   } |sO|dk(  s||k(  st        r?|||k  r8| 5t        r|dk(  r| j                         ry| ddd|f   j                         ryy)a  
    Detects whether the causal mask can be ignored in case PyTorch's SDPA is used, rather relying on SDPA's `is_causal` argument.

    In case no token is masked in the 2D `padding_mask` argument, if `query_length == 1` or
    `key_value_length == query_length`, we rather rely on SDPA `is_causal` argument to use causal/non-causal masks,
    allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is
    passed).
    Nrj   rk   r   TF)r'   jit
is_tracing
isinstancefxProxyr   rl   rp   r*   _is_torch_xpu_availabler5   )rO   ru   rg   rY   rv   ry   rs   s          r   _ignore_causal_mask_sdpar~      s     %%'q:lEHHNN+SqWoWqJL$6$6r$:Y$F||Il6I6IJ	!#A|O4 Q9#<@W!)Y9M-M   /,!2C   " 	 "!]l]"23779 r3   
batch_sizecache_position
local_sizeallow_is_causal_skipc                    |j                   d   }	t        |||d      }
|rt        |
|	|||      ryt        j                  ||j
                        }||z  }|
t        |t        |
            }t        j                  | |j
                        }t        j                  d|j
                        }t               5   t        |      ||||      }ddd       |S # 1 sw Y   S xY w)u  
    Create a 4D boolean mask of shape `(batch_size, 1, query_length, kv_length)` where a value of True indicates that
    the element should take part in the attention computation, and False that it should not.
    This function can only be used with torch>=2.5, as the context manager is otherwise not available.

    Args:
        batch_size (`int`):
            The batch size of the input sequence.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        kv_length (`int`):
            The size that the key and value states will have during the attention computation.
        kv_offset (`int`, optional):
            An optional offset to indicate at which first position the key and values states will refer to.
        mask_function (`Callable`):
            The mask factory function describing the mask pattern.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)
        local_size (`int`, optional):
            The size of the local attention, if we do not use full attention. This is used only if `allow_is_causal_skip=True`
            to try to skip mask creation if possible.
        allow_is_causal_skip (`bool`, optional):
            Whether to allow to return `None` for the mask under conditions where we can use the `is_causal` argument in
            `torch.sdpa` instead. Default to `True`.
        allow_torch_fix (`bool`, optional):
            Whether to update the mask in case a query is not attending to any tokens, to solve a bug in torch's older
            versions. We need an arg to skip it when using eager. By default `True`.


    ## Creating a simple causal mask:

    To create the following causal mask:

        0 ■ ⬚ ⬚ ⬚ ⬚
        1 ■ ■ ⬚ ⬚ ⬚
        2 ■ ■ ■ ⬚ ⬚
        3 ■ ■ ■ ■ ⬚
        4 ■ ■ ■ ■ ■

    You can do

    ```python
    >>> create_4d_causal_mask(batch_size=1, cache_position=torch.arange(5), kv_length=5)
    >>> tensor([[[[ True, False, False, False, False],
                  [ True,  True, False, False, False],
                  [ True,  True,  True, False, False],
                  [ True,  True,  True,  True, False],
                  [ True,  True,  True,  True,  True]]]])
    ```

    ## Creating a sliding window mask:

    To create the following sliding window mask (`sliding_window=3`):

        0 ■ ⬚ ⬚ ⬚ ⬚
        1 ■ ■ ⬚ ⬚ ⬚
        2 ■ ■ ■ ⬚ ⬚
        3 ⬚ ■ ■ ■ ⬚
        4 ⬚ ⬚ ■ ■ ■

    You can do

    ```python
    >>> create_4d_causal_mask(batch_size=1, cache_position=torch.arange(5), kv_length=5, mask_function=sliding_window_causal_mask_function(3))
    >>> tensor([[[[ True, False, False, False, False],
                  [ True,  True, False, False, False],
                  [ True,  True,  True, False, False],
                  [False,  True,  True,  True, False],
                  [False, False,  True,  True,  True]]]])
    ```

    ## Creating a chunked attention mask

    To create the following chunked attention mask (`chunk_size=3`):

        0 ■ ⬚ ⬚ ⬚ ⬚
        1 ■ ■ ⬚ ⬚ ⬚
        2 ■ ■ ■ ⬚ ⬚
        3 ⬚ ⬚ ⬚ ■ ⬚
        4 ⬚ ⬚ ⬚ ■ ■

    You can do

    ```python
    >>> create_4d_causal_mask(batch_size=1, cache_position=torch.arange(5), kv_length=5, mask_function=chunked_causal_mask_function(3))
    >>> tensor([[[[ True, False, False, False, False],
                [ True,  True, False, False, False],
                [ True,  True,  True, False, False],
                [False, False, False,  True, False],
                [False, False, False,  True,  True]]]])
    ```

    r   Frh   Nrk   r   )
rl   rt   r~   r'   rp   r*   r7   rR   r   re   )r   r   rg   rY   rW   rf   r   r   kwargsq_lengthrO   	kv_arangebatch_arangehead_arangecausal_masks                  r   sdpa_mask_recent_torchr      s    P ##A&H'	9UZ[L  8xQZ\egq r Y~/D/DEII !-1F|1TU<<
>3H3HIL,,q)>)>?K 
!	" k4om4\;P^`ijk k s   :CC#allow_torch_fixc	                    |j                   d   }
t        |||      }|rt        ||
|||      ryt        j                  ||j
                        }||z  } t        |d      dd||      }|ddddddf   j                  | ddd      }|||ddddddf   z  }t        s|r|t        j                  | dd      z  }|S )	a  
    NOTE: This function is only used when torch version is torch<2.5 - see `sdpa_mask_recent_torch` otherwise.

    Create a 4D boolean mask of shape `(batch_size, 1, query_length, kv_length)` where a value of True indicates that
    the element should take part in the attention computation, and False that it should not.
    If `allow_torch_fix=True` (the default), rows corresponding to query tokens that do not attend
    to any other tokens (due to padding) will be fully attended to instead, in order to avoid `nan` propagation (this does
    not change the final result).

    Args:
        batch_size (`int`):
            The batch size of the input sequence.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        kv_length (`int`):
            The size that the key and value states will have during the attention computation.
        kv_offset (`int`, optional):
            An optional offset to indicate at which first position the key and values states will refer to.
        mask_function (`Callable`):
            The mask factory function describing the mask pattern.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)
        local_size (`int`, optional):
            The size of the local attention, if we do not use full attention. This is used only if `allow_is_causal_skip=True`
            to try to skip mask creation if possible.
        allow_is_causal_skip (`bool`, optional):
            Whether to allow to return `None` for the mask under conditions where we can use the `is_causal` argument in
            `torch.sdpa` instead. Default to `True`.
        allow_torch_fix (`bool`, optional):
            Whether to update the mask in case a query is not attending to any tokens, to solve a bug in torch's older
            versions. We need an arg to skip it when using eager. By default `True`.
    r   Nrk   F)r]   rj   T)dimkeepdim)
rl   rt   r~   r'   rp   r*   re   expand#_is_torch_greater_or_equal_than_2_5r5   )r   r   rg   rY   rW   rf   r   r   r   r   r   rO   r   r   s                 r   sdpa_mask_older_torchr   |  s    X ##A&H'	9ML  8xQZ\egq r Y~/D/DEII C/-EB4~_hiKdD!Q./66z2r2NK!LD$1A$BB /?uyy+2tDDr3   r%   c                     |j                  dd      }t        d| |||||ddd|}	t        j                  |      j                  }
t        j
                  |	t        j                  d|	j                  |      |
      }	|	S )ax  
    Create a 4D float mask of shape `(batch_size, 1, query_length, kv_length)` where a value of 0 indicates that
    the element should take part in the attention computation, and -inf (minimum value for the given `dtype`) that
    it should not.

    Args:
        batch_size (`int`):
            The batch size of the input sequence.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        kv_length (`int`):
            The size that the key and value states will have during the attention computation.
        kv_offset (`int`, optional):
            An optional offset to indicate at which first position the key and values states will refer to.
        mask_function (`Callable`):
            The mask factory function describing the mask pattern.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)
        dtype (`torch.dtype`, optional):
            The dtype to use for the mask. By default, `torch.float32`.
    r   NF)r   r   rg   rY   rW   rf   r   r   g        r*   r%   r$   )pop	sdpa_maskr'   finfominwheretensorr*   )r   r   rg   rY   rW   rf   r%   r   _r1   	min_dtypes              r   
eager_maskr     s    @ 	

)40A 
%#%"
 
D E"&&I;;tU\\#dkkOQZ[DKr3   c                 F    ||dd| df   }|j                         rd}|S )a!  
    Create the attention mask necesary to use FA2. Since FA2 is un-padded by definition, here we simply return
    `None` if the mask is fully causal, or we return the 2D mask which will then be used to extract the seq_lens.
    We just slice it in case of sliding window.

    Args:
        batch_size (`int`):
            The batch size of the input sequence.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        kv_length (`int`):
            The size that the key and value states will have during the attention computation.
        kv_offset (`int`, optional):
            An optional offset to indicate at which first position the key and values states will refer to.
        mask_function (`Callable`):
            The mask factory function describing the mask pattern.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)
    N)r5   )r   r   rg   rY   rW   rf   r   s          r   flash_attention_maskr     s5    8 !'I:;7 !Nr3   c           	         |j                   d   |d   }}||j                   d   t        z  dz   t        z  }	|	|j                   d   z
  }	t        s3|	dkD  r.t        j                  j
                  j                  |dd|	f      }t        |||d      }
t        |t        |
            }t        |||      }t        || d|||j                  t              }|S )a  
    Create a 4D block mask which is a compressed representation of the full 4D block causal mask. BlockMask is essential
    for performant computation of flex attention. See: https://pytorch.org/blog/flexattention/

    Args:
        batch_size (`int`):
            The batch size of the input sequence.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        kv_length (`int`):
            The size that the key and value states will have during the attention computation.
        kv_offset (`int`, optional):
            An optional offset to indicate at which first position the key and values states will refer to.
        mask_function (`Callable`):
            The mask factory function describing the mask pattern.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)
    r   Nr   )valuero   Fr   )mask_modBHQ_LENKV_LENr*   _compile)rl   flex_default_block_size#_is_torch_greater_or_equal_than_2_6r'   rm   rn   ro   rt   r7   rR   r\   r   r*   )r   r   rg   rY   rW   rf   r   r   rX   pad_lenrO   
block_masks               r   flex_attention_maskr   #  s    6 (--a0.2ChH ! #((+/FF!KOffN00332w{"XX0044^1STV]R^4_N+NIyY^_!-1F|1TU 1)TM #

$$4J r3   c                       e Zd ZeeeedZy)AttentionMaskInterface)sdpaeagerflash_attention_2flex_attentionN)__name__
__module____qualname__r   r   r   r   _global_mappingr$   r3   r   r   r   \  s     1-	Or3   r   ALL_MASK_ATTENTION_FUNCTIONSposition_idsc                 z    | ddddf   dz
  }t        j                  | |d      }|dk7  j                  d      }|S )aj  
    Find the indices of the sequence to which each new query token in the sequence belongs when using packed
    tensor format (i.e. several sequences packed in the same batch dimension).

    Args:
        position_ids (`torch.Tensor`)
            A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.

    Returns:
        A 2D tensor where each similar integer indicates that the tokens belong to the same sequence. For example, if we
        pack 3 sequences of 2, 3 and 1 tokens respectively along a single batch dim, this will return [[0, 0, 1, 1, 1, 2]].
    Nr   rj   )prependr   )r'   diffcumsum)r   first_dummy_valueposition_diffrS   s       r   find_packed_sequence_indicesr   k  sM    $ %QU+a/JJ|5FBOM)Q.66r:  r3   configinput_embedspast_key_values	layer_idxc                    t        |t        j                  t        f      rt	        |j
                        dk(  rd|dddfS | j                  t        j                  vry|:|j                  dk(  r+|j                  |j                  t        j                        }||j                  ||      \  }}n|j
                  d   d}}d}	|B|@|>|j
                  d   }
|
|j
                  d   k7  r|j                  |
d	      }t        |      }	d
||	||fS )ar  
    Perform some common pre-processing of the mask arguments we get from the modeling code. Mostly determine the
    key-value length and offsets, and if we should early exit or not.

    Args:
        config (`PretrainedConfig`):
            The model config.
        input_embeds (`torch.Tensor`):
            The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
            batch size, query length and dtype.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
            It can also be an already prepared 4D mask, in which case it is returned as-is.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        past_key_values (`Cache`, optional):
            The past key values, if we use a cache.
        position_ids (`torch.Tensor`, optional)
            A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
        layer_idx (`int`, optional):
            If `past_key_values` is not None, this is the layer index of the cache from which to get the key-value
            length and offset. Indeed, for hybrid caches, different layers may return different lengths.

    Returns:
        early_exit (`bool`):
            Whether we should early exit mask creation, and return the mask as-is.
        attention_mask (`torch.Tensor` or `BlockMask` or `None`):
            The attention mask to either return immediately, or to use in downstream mask creation.
        packed_sequence_mask (`torch.Tensor`, optional):
            In case we detected packed sequence format, this is a tensor where each similar integer indicates that
            the tokens belong to the same sequence.
        kv_length (`int`):
            The size that the key and value states will have during the attention computation.
        kv_offset (`int`):
            An offset to indicate at which first position the key and values states will refer to.
       TN)TNNNN   r   r   r   rj   F)rz   r'   Tensorr   lenrl   _attn_implementationr   r   ndimr)   r*   r(   get_mask_sizesr   r   )r   r   rf   r   r   r   r   rg   rY   rS   r   s              r   _preprocess_mask_argumentsr     s)   \ .5<<";<^EYEYAZ^_A_^T455 ""*F*V*VV+ !n&9&9Q&>'**.2G2Guzz*Z ".==niX	9  ,11!4a9	  N$:?V!''*
++A..'..z2>L;LI."6	9LLr3   or_mask_functionand_mask_functionc                 ,   t        |d      r*d|j                  v r|j                  j                  d      }nd}t        | ||||||      \  }	}}
}}|	r|S |j                  d   |j
                  }}t        }t        | j                     }t        rd}n||j                   nd}|
t        rt        |t        |
            }d}|t        st        d      t        ||      }d}|t        st        d      t        ||      }d} |||||||||| 	      }|S )a,  
    Create a standard causal mask based on the attention implementation used (stored in the config). If `past_key_values`
    has an HybridCache structure, this function will return the mask corresponding to one of the "full_attention" layers (to align
    to what is needed in the `modeling_xxx.py` files).

    Args:
        config (`PretrainedConfig`):
            The model config.
        input_embeds (`torch.Tensor`):
            The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
            batch size, query length and dtype.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
            It can also be an already prepared 4D mask, in which case it is returned as-is.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        past_key_values (`Cache`, optional):
            The past key values, if we use a cache.
        position_ids (`torch.Tensor`, optional)
            A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
        or_mask_function (`Callable`, optional):
            An optional mask function to combine with the causal mask function (by doing the union of both). This is
            useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
        and_mask_function (`Callable`, optional):
            An optional mask function to combine with the causal mask function (by doing the intersection of both). This is
            useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
    
is_slidingFr   TLUsing `or_mask_function` or `and_mask_function` arguments require torch>=2.6)	r   r   rg   rY   rW   rf   r   r%   r   )hasattrr   indexr   rl   r%   r?   r   r   r}   is_compileabler   r7   rV   
ValueErrorr=   )r   r   rf   r   r   r   r   r   r   
early_exitrS   rg   rY   r   r%   mask_factory_functionmask_interfacer   r   s                      r   create_causal_maskr     sf   L -%?;U;U2U#..44U;		Mgnno|]fNJJ 4i $**1-|/A/AJ01&2M2MNN #ETE`?#A#AAfj ',O )*?A^_sAt u$ #2kll ()>@P Q$$2kll )*?AR S$ !%+%1
K r3   c                 Z   t        |d      r*d|j                  v r|j                  j                  d      }nd}t        | ||||||      \  }	}}
}}|	r|S t	        | dd      }|t        d      |j                  d   |j                  }}t        |      }t        | j                     }||j                   nd}|
t        rt        |t        |
            }d}|t        st        d      t        ||      }d}|t        st        d      t        ||      }d} ||||||||||| 	
      }|S )
a  
    Create a sliding window causal mask based on the attention implementation used (stored in the config). This type
    of attention pattern was mostly democratized by Mistral. If `past_key_values` has an HybridCache structure, this
    function will return the mask corresponding to one of the "sliding_attention" layers (to align to what is needed in the
    `modeling_xxx.py` files).

    Args:
        config (`PretrainedConfig`):
            The model config.
        input_embeds (`torch.Tensor`):
            The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
            batch size, query length and dtype.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
            It can also be an already prepared 4D mask, in which case it is returned as-is.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        past_key_values (`Cache`, optional):
            The past key values, if we use a cache.
        position_ids (`torch.Tensor`, optional)
            A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
        or_mask_function (`Callable`, optional):
            An optional mask function to combine with the sliding causal mask function (by doing the union of both). This is
            useful to easily overlay another mask on top of the sliding causal one, for example for image tokens handling.
        and_mask_function (`Callable`, optional):
            An optional mask function to combine with the sliding causal mask function (by doing the intersection of both). This is
            useful to easily overlay another mask on top of the sliding causal one, for example for image tokens handling.
    r   Tr   r@   NzJCould not find a `sliding_window` argument in the config, or it is not setFr   
r   r   rg   rY   rW   rf   r   r   r%   r   )r   r   r   r   getattrr   rl   r%   rL   r   r   r   r   r7   rV   r=   )r   r   rf   r   r   r   r   r   r   r   rS   rg   rY   r@   r   r%   r   r   r   r   s                       r   !create_sliding_window_causal_maskr   3  s   N -$/:T:T2T#..44T:		Mgnno|]fNJJ 4i V%5t<Neff$**1-|/A/AJ?O1&2M2MNN BQA\===bf ',O )*?A^_sAt u$ #2kll ()>@P Q$$2kll )*?AR S$ !%+%1!K r3   c                    t        |d      r*d|j                  v r|j                  j                  d      }nd}t        | ||||||      \  }	}}
}}|	r|S t	        | dd      }|t        d      | j                  dk(  r||z   |kD  rt        d      |j                  d   |j                  }}t        |      }t        | j                     }||j                   nd}|
t        rt        |t        |
            }d	}|t        st        d
      t        ||      }d	}|t        st        d
      t        ||      }d	} ||||||||||| 
      }|S )a  
    Create a chunked attention causal mask based on the attention implementation used (stored in the config). This type
    of attention pattern was mostly democratized by Llama4. If `past_key_values` has an HybridCache structure, this
    function will return the mask corresponding to one of the "chunked_attention" layers (to align to what is needed in the
    `modeling_xxx.py` files).

    Args:
        config (`PretrainedConfig`):
            The model config.
        input_embeds (`torch.Tensor`):
            The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
            batch size, query length and dtype.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
            It can also be an already prepared 4D mask, in which case it is returned as-is.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        past_key_values (`Cache`, optional):
            The past key values, if we use a cache.
        position_ids (`torch.Tensor`, optional)
            A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
        or_mask_function (`Callable`, optional):
            An optional mask function to combine with the chunked causal mask function (by doing the union of both). This is
            useful to easily overlay another mask on top of the chunked causal one, for example for image tokens handling.
        and_mask_function (`Callable`, optional):
            An optional mask function to combine with the chunked causal mask function (by doing the intersection of both). This is
            useful to easily overlay another mask on top of the chunked causal one, for example for image tokens handling.
    r   Tr   attention_chunk_sizeNzQCould not find an `attention_chunk_size` argument in the config, or it is not setr   zFlash attention 2 cannot handle chunked attention, and the key-value length is larger than the chunk size so the chunked pattern cannot be respected. You should use another `attn_implementation` when instantiating the modelFr   r   )r   r   r   r   r   r   r   rl   r%   rN   r   r   r   r7   rV   r=   )r   r   rf   r   r   r   r   r   r   r   rS   rg   rY   rG   r   r%   r   r   r   r   s                       r   create_chunked_causal_maskr     s   N -$/:T:T2T#..44T:		Mgnno|]fNJJ 4i !7>Jlmm ""&99i)>SV`>`}
 	

 %**1-|/A/AJ8D1&2M2MNN BQA\===bf ',O )*?A^_sAt u$ #2kll ()>@P Q$$2kll )*?AR S$ !%+%1K r3   )full_attentionsliding_attentionchunked_attentionc           	      $   | j                         }	|	|||||||d}
t        |	d      r/i }t        |	j                        D ]  }t	        |   di |
||<    |S t        |	dd      t        di |
S t        |	dd      t        di |
S t        di |
S )a  
    This function mimics how we create the masks in the `modeling_xxx.py` files, and is used in `generate` in order
    to easily create the masks in advance, when we compile the forwards with Static caches.

    Args:
        config (`PretrainedConfig`):
            The model config.
        input_embeds (`torch.Tensor`):
            The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
            batch size, query length and dtype.
        attention_mask (`torch.Tensor`, optional):
            The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
            It can also be an already prepared 4D mask, in which case it is returned as-is.
        cache_position (`torch.Tensor`):
            A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
        past_key_values (`Cache`, optional):
            The past key values, if we use a cache.
        position_ids (`torch.Tensor`, optional)
            A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
        or_mask_function (`Callable`, optional):
            An optional mask function to combine with the other mask function (by doing the union of both). This is
            useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
        and_mask_function (`Callable`, optional):
            An optional mask function to combine with the other mask function (by doing the intersection of both). This is
            useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
    )r   r   rf   r   r   r   r   r   layer_typesr@   Nr   r$   )	get_text_configr   setr   &LAYER_PATTERN_TO_MASK_FUNCTION_MAPPINGr   r   r   r   )r   r   rf   r   r   r   r   r   r   effective_configmask_kwargscausal_maskslayer_patterns                r   create_masks_for_generater     s    L --/ #$((*$,.	K / !1!=!=> 	oM*PQ^*_*nbm*nL'	o	!#3T	:	F0?;??	!#94	@	L)8K88,,,r3   z[92mz[93mz[0mu   ■u   ⬚u   ∙u   ⬕u   ⬔c                 >    | dk(  rd}d}d}d}d}nd}d}d}d	}||||fS )
Nmajongu   🀞u   🀙u   🀆u   🀛u   █u   ░u   ▙u   ▜r$   )styleBLACK_SQUAREWHITE_SQUARELOW_TRIANGLEUPPER_TRIANGLEs        r   	get_styler   N  sF    |^CCr3   original_tensorc           
         t        |      \  }}}}| j                  \  }}|\  }	}
||	k  r||
k  sd|z  |z  }|dkD  r%|
}t        |	t        dt	        |
|z                    }n|	}t        dt	        |	|z              }| j                  d      j                  d      }t        j                  |||f      d   }n| }g }t        |      D ]  }d}t        |      D ]  }|||f   dk(  r||z  }|||f   dk(  r||z  }#|dkD  r8|||dz
  f   dk(  r||z  };|||dz
  f   dk(  r||z  }N||||f   dk(  r|n|z  }`||||f   dk(  r|n|||f   dk(  r|n|||dz   f   dk(  r|n|z  } |j                  |        dj                  |      S )Nr   r   r   )output_size)r   r    
)r   rl   r   maxround	unsqueezeFadaptive_avg_pool2drangeappendjoin)r   	grid_sizer   r   r   r   r   hwmax_hmax_waspect_ratior   r0   irowjs                    r   tensor_to_mask_visualr  d  s   ?H?O<L,n  DAqLE5I!e)1uqy!AE3q%(<"=>?AAAuU\123A !**1-77:&&vAq6B4H  F1X q 	Aad|q |#1"|#q5aQh'1,|+1q5)Q.~-vad|q/@|lR!!Q$<1, %  &ad|q0 )4:1a!e84D4I.|C	. 	c36 99Vr3   c                   h    e Zd ZddZd ZddZd Zd Zedde	j                  dee   d	d fd
       Zy)AttentionMaskNc                 T    || _         t        j                  j                  | |d      S )NF)require_grad)r   r'   r   _make_subclass)clsdatar   s      r   __new__zAttentionMask.__new__  s%    	||**35*IIr3   c                      y r   r$   )selfr  s     r   __init__zAttentionMask.__init__  s    r3   c           
          | }|j                   ^ }}}g }t        t        j                  |D cg c]  }t	        |       c}       D ]k  \  }	}
|	|k(  r5|j                  d       |j                  d       |j                  d        n.t        ||
   || j                        }|j                  |       m |j                  dt        | j                          d| j                   d       dj                  |      S c c}w )	z2Returns a string representation of the block mask.z...z7To print out more, set AttentionMask.to_string(limit=N)zRYou can also index (AttentionMask[batch, head]) to choose a specific batch or head)r  r   ztorch.Tensor(shape=z, dtype=)r   )rl   	enumerate	itertoolsproductr   r  r  r   tupler%   r  )r  r  limit
dense_mask
batch_dimsnum_rowsnum_cols	total_visr	  idxr,   	block_viss               r   	to_stringzAttentionMask.to_string  s    
*4*:*:'Xx	'	(9(9j;YE!H;Y(Z[ 	(NCe|  '  !Z[  !uv-j.Cy`d`j`jkIY'	( 	.uTZZ/@.A$**UVWXyy## <Zs   C;c                 "    | j                         S r   r&  r  s    r   __repr__zAttentionMask.__repr__      ~~r3   c                 "    | j                         S r   r(  r)  s    r   __str__zAttentionMask.__str__  r+  r3   r   r   r   c                 $     | |      }||_         |S r   )r   )r  r   r   ress       r   from_tensorzAttentionMask.from_tensor  s    &k	
r3   r   )   (   r   )r   r   r   r  r  r&  r*  r-  classmethodr'   r   r   strr0  r$   r3   r   r  r    sM    J
$$    hsm   r3   r  )Tr   )NNN)r1  r   )Rr  typingr   r   r   r'   torch.nn.functionalrm   rn   r   cache_utilsr   configuration_utilsr   utilsr	   utils.genericr
   utils.import_utilsr   r   r   !torch.nn.attention.flex_attentionr   r   r   r   r   r   r   r}   ,torch._dynamo._trace_wrapped_higher_order_opr   listr7   r=   rE   r(   r?   rF   rJ   rL   rN   rR   rV   r\   re   rt   r~   r   r   r   float32r%   r   r   r   r   r   __annotations__r   r  r   r   r   r   r   r   GREENYELLOWRESETr   r   GREY_SQUAREr   r   r   YELLOW_SQUAREGREEN_SQUAREr5  r  r  r$   r3   r   <module>rH     s    , ,     1 ) + q q  !gNN I&?RV&W #&?RV&W #02 &TtH~ ( d8n  C 3 s C TX 	3 	8 		 	 	S S SHS HX H    	 	C 	TW 	\d 	8   8 \`U\\*7:GJTXell6 +/+5<<(++ + 	+
 #3-+ 
+d 2-1 $!%AALLA A 	A
 A U\\*A A A ellAP 2-1 $!% FFLLF F 	F
 F U\\*F F F F ellFV 'J"Od	 2-1//LL/ / 	/
 / U\\*/ ;;/ \\/l 2-1$$LL$ $ 	$
 $ U\\*$V 2-166LL6 6 	6
 6 U\\*6 6r-  8N7O 4 O u||    6NMNM,,NM U5<<#:;<NM LL	NM
 e_NM 5<<(NM }NM 4%i 7893CDNMn ,0+/,0YY,,Y U\\*Y LL	Y
 e_Y 5<<(Y x(Y  )Y eELL)+,-YD ,0+/,0\\,,\ U\\*\ LL	\
 e_\ 5<<(\ x(\  )\ eELL)+,-\J ,0+/,0cc,,c U\\*c LL	c
 e_c 5<<(c x(c  )c eELL)+,-cN ):3* & ,0+/,0@-@-,,@- U\\*@- LL	@-
 e_@- 5<<(@- x(@-  )@-J 		D$ (<.0ug.15<< 1`c 1h&ELL &r3   