
    rh                     H    d dl mZ ddlmZmZ ddlmZ  G d de      ZdgZy)   )PretrainedConfig   )CONFIG_MAPPING
AutoConfig)SuperPointConfigc                   t     e Zd ZdZdZdeiZ	 	 	 	 	 	 	 	 	 	 	 	 	 ddedededede	de	d	e	d
e	de
def fdZ xZS )LightGlueConfiga  
    This is the configuration class to store the configuration of a [`LightGlueForKeypointMatching`]. It is used to
    instantiate a LightGlue model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the LightGlue
    [ETH-CVG/lightglue_superpoint](https://huggingface.co/ETH-CVG/lightglue_superpoint) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        keypoint_detector_config (`Union[AutoConfig, dict]`,  *optional*, defaults to `SuperPointConfig`):
            The config object or dictionary of the keypoint detector.
        descriptor_dim (`int`, *optional*, defaults to 256):
            The dimension of the descriptors.
        num_hidden_layers (`int`, *optional*, defaults to 9):
            The number of self and cross attention layers.
        num_attention_heads (`int`, *optional*, defaults to 4):
            The number of heads in the multi-head attention.
        num_key_value_heads (`int`, *optional*):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details checkout [this
            paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
            `num_attention_heads`.
        depth_confidence (`float`, *optional*, defaults to 0.95):
            The confidence threshold used to perform early stopping
        width_confidence (`float`, *optional*, defaults to 0.99):
            The confidence threshold used to prune points
        filter_threshold (`float`, *optional*, defaults to 0.1):
            The confidence threshold used to filter matches
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        hidden_act (`str`, *optional*, defaults to `"gelu"`):
            The activation function to be used in the hidden layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        attention_bias (`bool`, *optional*, defaults to `True`):
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
        trust_remote_code (`bool`, *optional*, defaults to `False`):
            Whether to trust remote code when using other models than SuperPoint as keypoint detector.

    Examples:
        ```python
        >>> from transformers import LightGlueConfig, LightGlueForKeypointMatching

        >>> # Initializing a LightGlue style configuration
        >>> configuration = LightGlueConfig()

        >>> # Initializing a model from the LightGlue style configuration
        >>> model = LightGlueForKeypointMatching(configuration)

        >>> # Accessing the model configuration
        >>> configuration = model.config
        ```
    	lightgluekeypoint_detector_configdescriptor_dimnum_hidden_layersnum_attention_headsdepth_confidencewidth_confidencefilter_thresholdinitializer_range
hidden_acttrust_remote_codec                     || _         ||z  dk7  rt        d      || _        || _        || _        ||}|| _        || _        || _        || _        |	| _	        t        |t              rZ|j                  dd      |d<   |d   t        vr%t        j                  |d   | j                         }nt        |d      di |ddi}|t        d   d	      }|| _        || _        |d
z  | _        |
| _        || _        || _        t-        | \  di | y )N    z1descriptor_dim % num_heads is different from zero
model_type
superpoint_name_or_path)r   attn_implementationeager)r   r    )r   
ValueErrorr   r   r   num_key_value_headsr   r   r   r   
isinstancedictgetr   r   from_pretrainedr   hidden_sizeintermediate_sizer   attention_dropoutattention_biassuper__init__)selfr   r   r   r   r   r   r   r   r   r   r%   r&   r   kwargs	__class__s                  /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/lightglue/configuration_lightglue.pyr(   zLightGlueConfig.__init__Y   sQ   ( "3//14PQQ,!2#6  &"5#6  0 0 0!2 .55M5Q5QR^`l5m$\2'5^K+5+E+E,_=QUQgQg,( ,::RS_:`+a ,.,DK,( $+'5l'CX_'`$(@%)!/!!3$!2,"6"    )N   	      Ngffffff?gGz?g?g{Gz?gelug        TF)__name__
__module____qualname____doc__r   r   sub_configsr   intfloatstrboolr(   __classcell__)r+   s   @r,   r	   r	      s    8t J-z:K 6:!!"#$ "&"&"%#' "'?#"2?# ?# 	?#
 !?#  ?#  ?#  ?# !?# ?#  ?# ?#r-   r	   N)	configuration_utilsr   autor   r   r   r   r	   __all__r   r-   r,   <module>r?      s*   , 4 - )}#& }#@ 
r-   