
    rh                         d dl mZ d dlmZmZmZ ddlmZmZ ddl	m
Z
 ddlmZmZmZ  e       rd dlmZmZ  ej$                  e      Z G d d	e      Z G d
 de      Z G d de      Z G d de      Zg dZy)    )Sequence)AnyOptionalUnion   )PretrainedConfiglayer_type_validation)rope_config_validation)is_timm_availableloggingrequires_backends)ImageNetInfoinfer_imagenet_subsetc            E       l    e Zd ZdZdZdgZddddddddZdgdgfd	d
gd	gfd	gd	gfdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d-dedededede	ee
e   f   dedededededededededededededeeeef      ded ed!ed"ed#ee
e      d$ed%ed&ed'ed(ed)ed*ed+ee	ee
e   f      f@ fd,Z xZS ).Gemma3nTextConfiga\)  
    This is the configuration class to store the configuration of a [`Gemma3nTextModel`]. It is used to instantiate an
    Gemma3nTextModel model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B, e.g.
    [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B).

    Configuration objects that inherit from [`Gemma3nTextConfig`] and can be used to control the model outputs. Read
    the documentation from [`Gemma3nTextConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 262400):
            Vocabulary size of the Gemma3nText model. Defines the number of different tokens that can be represented by
            the `inputs_ids` passed when calling [`Gemma3nTextModel`]
        vocab_size_per_layer_input (`int`, *optional*, defaults to 262144):
            Vocabulary size of the per-layer text embeddings that augment the standard embeddings.
        hidden_size (`int`, *optional*, defaults to 2048):
            Dimension of the hidden representations.
        hidden_size_per_layer_input (`int`, *optional*, defaults to 256):
            Dimension of the hidden representations for per-layer emebeddings.
        intermediate_size (`int` or `Sequence[int]`, *optional*, defaults to 16384):
            Dimension of the MLP representations. MatFormer configurations may wish to provide a sequence of integers
            to account for vairable intermediate_size values across layers. In such cases,
            `len(intermediate_size) == num_hidden_layers`.
        num_hidden_layers (`int`, *optional*, defaults to 35):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*, defaults to 2):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details checkout this
            [paper](https://arxiv.org/pdf/2305.13245.pdf). If not specified, will default to `num_attention_heads`.
        head_dim (`int`, *optional*, defaults to 256):
            The attention head dimension.
        hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the decoder. Will default to
            `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"`
            activation function.
        max_position_embeddings (`int`, *optional*, defaults to 32768):
            The maximum sequence length that this model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        pad_token_id (`int`, *optional*, defaults to 0):
            Padding token id.
        eos_token_id (`int`, *optional*, defaults to 1):
            End of stream token id.
        bos_token_id (`int`, *optional*, defaults to 2):
            Beginning of stream token id.
        rope_theta (`float`, *optional*, defaults to 1000000.0):
            The base period of the RoPE embeddings.
        rope_scaling (`Dict`, *optional*):
            Dictionary containing the scaling configuration for the RoPE embeddings used in gloabl attention.
            NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we
            recommend you to update this value accordingly.
            Expected contents:
                `rope_type` (`str`):
                    The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
                    'llama3'], with 'default' being the original RoPE implementation.
                `factor` (`float`, *optional*):
                    Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
                    most scaling types, a `factor` of x will enable the model to handle sequences of length x *
                    original maximum pre-trained length.
                `original_max_position_embeddings` (`int`, *optional*):
                    Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
                    pretraining.
                `attention_factor` (`float`, *optional*):
                    Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
                    computation. If unspecified, it defaults to value recommended by the implementation, using the
                    `factor` field to infer the suggested value.
                `beta_fast` (`float`, *optional*):
                    Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
                    ramp function. If unspecified, it defaults to 32.
                `beta_slow` (`float`, *optional*):
                    Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
                    ramp function. If unspecified, it defaults to 1.
                `short_factor` (`List[float]`, *optional*):
                    Only used with 'longrope'. The scaling factor to be applied to short contexts (<
                    `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                    size divided by the number of attention heads divided by 2
                `long_factor` (`List[float]`, *optional*):
                    Only used with 'longrope'. The scaling factor to be applied to long contexts (<
                    `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                    size divided by the number of attention heads divided by 2
                `low_freq_factor` (`float`, *optional*):
                    Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
                `high_freq_factor` (`float`, *optional*):
                    Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
        rope_local_base_freq (float, *optional*, defaults to 10000.0):
            The base period of the RoPE embeddings for local attention.
        attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        sliding_window (`int`, *optional*, defaults to 512):
            This is the size of the sliding window used by local attention layers.
        layer_types (`Optional`, *optional*):
            A sequence of strings defining the attention type for that layer as either "sliding_attention" or
            "full_attention". If not provided, `layer_types` will de inferred from `num_hidden_layers` using a pattern
            of four "sliding_attention" layers followed one "full_attention". The last layer in the model should always
            be a "full_attention" layer.
        final_logit_softcapping (`float`, *optional*, defaults to 30.0):
            Scaling factor when applying tanh softcapping on the logits.
        altup_active_idx (`int`, *optional*, defaults to 0):
            The index of the prediction from which AltUp will compute additional predictions or correct
        altup_coef_clip (`float`, *optional*, defaults to 120.0):
            The maximum amplitude of an AltUp prediction or correction coeficient weight.
        altup_correct_scale (`bool`, *optional*, defaults to `True`):
            If True, apply the `AltUp.correct_output_scale` to the corrected prediction at `altup_active_idx`.
        altup_num_inputs (`int`, *optional*, defaults to 4):
            The number of predictions that AltUp should be make given the input sequence.
        num_kv_shared_layers (`int`, *optional*, defaults to 15):
            The number of layer that share KV cache values. During the forward pass, the last `num_kv_shared_layers`
            layers in the model "share" the KV values in that each local and global layer in this range uses the KV
            cache values computed for the last local or global layer, respectively, before entering this range. The
            value should be `num_kv_shared_layers` should be a scalar of `sliding_window_pattern`.
        laurel_rank (int, *optional*, defaults to 64):
            The intermediate size for the linear projections in the Learned Augmented Residual Layer.
        activation_sparsity_pattern (Sequence[float], *optional*, defaults to `(0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)`):
            The sparsity factor used to extract the top-k activations for a given layer. The provided Sequence must
            explicitly provide a sparsity value for each layer in the model.

    ```python
    >>> from transformers import Gemma3nTextModel, Gemma3nTextConfig

    >>> # Initializing a Gemma3nText gemma3n_text-E4B style configuration
    >>> configuration = Gemma3nTextConfig()

    >>> # Initializing a model from the gemma3n_text-E4B style configuration
    >>> model = Gemma3nTextModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
    gemma3n_textpast_key_valuescolwiserowwise)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.mlp.gate_projzlayers.*.mlp.up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnorm
vocab_sizevocab_size_per_layer_inputhidden_sizehidden_size_per_layer_inputintermediate_sizenum_hidden_layersnum_attention_headsnum_key_value_headshead_dimhidden_activationmax_position_embeddingsinitializer_rangerms_norm_eps	use_cachepad_token_ideos_token_idbos_token_id
rope_thetarope_scalingrope_local_base_freqattention_biasattention_dropoutsliding_windowlayer_typesfinal_logit_softcappingaltup_active_idxaltup_coef_clipaltup_correct_scalealtup_num_inputsnum_kv_shared_layerslaurel_rankactivation_sparsity_patternc!                 n   t        %|   d|||d|! t        |t              r"t	        |      x}"|k7  rt        d| d|" d      t        |t              s|g|z  }|| _        || _        || _        || _	        || _
        || _        || _        |	| _        || _        || _        || _        || _        || _        || _        || _        |
| _        || _        || _        || _        || _        || _        t7        |        |6t9        | j                        D #cg c]  }#|#dz   dz  dk(  rdnd	 c}#| _        n|| _        t;        | j0                         || _        || _        || _         || _!        || _"        || _#        || _$        | d
g|z  } t	        |       x}$|k7  rt        d| d|$ d      | | _%        y c c}#w )N)r+   r-   r,   zjintermediate_size must have an explicit intermediate size for every layer or one for all layers. Expected z values but got .      r   full_attentionsliding_attention        zeactivation_sparsity_pattern must have an explicit activation sparsity value for every layer.Expected  )&super__init__
isinstancer   len
ValueErrorr   r   r'   r   r!   r"   r#   r%   r$   r(   r)   r*   r.   r1   r2   r&   r3   r5   r4   r0   r/   r
   ranger	   r    r:   r6   r7   r8   r9   r;   r<   )&selfr   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   kwargsintsize_lenilen_asp	__class__s&                                        /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/gemma3n/configuration_gemma3n.pyrF   zGemma3nTextConfig.__init__   s)   H 	 	
%%%	
 		
 '2sK\G]8]bs7s-..>{m1N  -x8!2 36G G$*D''>$&!2!2#6  #6 !2("$,!2!2,'>$&$8!(t$W\]a]s]sWt RSQUaK1$4 :MM D  +Dd../+F($8! 0.#6  0&&.+.%2C*C'677G<MM-..>wiqJ  ,G(5 s   F2) i           i @  #         rT   gelu_pytorch_tanhi   {Gz?ư>Tr   r?   rW   g    .ANg     @FrC   i   Ng      >@r   g      ^@T      @   )#ffffff?r^   r^   r^   r^   r^   r^   r^   r^   r^   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   rC   )__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planbase_model_pp_planintr   r   strfloatboolr   dictr   rF   __classcell__rP   s   @rQ   r   r   %   s   L\  J#4"5%.%.%.%."+ )"+ &(9:#%568IJ!"_$56 "*1+.7=!##$#$!4'-#'"'15&.$#&!/3)- !!&$( !$&OiCfGfG %(fG 	fG
 &)fG !hsm!34fG fG !fG !fG fG fG "%fG !fG fG fG  !fG" #fG$ %fG& 'fG( tCH~.)fG* $+fG, -fG. !/fG0 1fG2 hsm,3fG4 "'5fG6 7fG8 9fG: ";fG< =fG> "?fG@ AfGB &.eE8E?4J.K%LCfG fG    r   c            +            e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddedededededed	ed
ededededededededeeef   dedeeeef   eeef   f   deeeef   eeef   f   f& fdZ xZ	S )Gemma3nAudioConfiga2  
    This is the configuration class to store the configuration of a [`Gemma3nAudioEncoder`]. It is used to instantiate
    an `Gemma3nAudioEncoder` model according to the specified arguments, defining the model architecture. Instantiating
    a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B, e.g.,
    [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B).

    Configuration objects that inherit from [`Gemma3nAudioConfig`] and can be used to control the model outputs. Read
    the documentation from [`Gemma3nAudioConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 128):
            Vocabulary size of the additional hard-token embeddings for audio model. These augment the embeddings
            included in the `Gemma3nTextModel` to provide, e.g., the end of audio and audio soft token placeholder
            tokens when converting `input_ids` to embeddings in the `Gemma3nForConditionalGeneration` model.
        vocab_offset (`int`, *optional*, defaults to 262272):
            Offset between the tokenizer vocab index for the token ids embedded by `Gemma3nMultimodalEmbedder` and the
            0-indexed `Gemma3nMultimodalEmbedder.embedding` table.
        input_feat_size (`int`, *optional*, defaults to 128):
            The number of channels in each mel-spectrogram frame.
        hidden_size (`int`, *optional*, defaults to 1536):
            Dimension of the hidden representations.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.
        gradient_clipping (`float`, *optional*, defaults to 10000000000.0):
            Clipping value used to stablize extremely large gradient values.
        conf_attention_chunk_size (`int`, *optional*, defaults to 12):
            The sub-sequence size for local attention processing inside the Conformer ("conf") section of the
            Universal Speech Model.
        conf_attention_context_left (`int`, *optional*, defaults to 13):
            The left context size of the local attention inside the Conformer ("conf") section of the
            Universal Speech Model.
        conf_attention_context_right (`int`, *optional*, defaults to 0):
            The right context size of the local attention inside the Conformer ("conf") section of the
            Universal Speech Model.
        conf_attention_logit_cap (`float`, *optional*, defaults to 50.0):
            Logit cap applied during local attention inside the Conformer ("conf") section of the
            Universal Speech Model.
        conf_num_attention_heads (`int`, *optional*, defaults to 8):
            The number of attention heads in local attention inside the Conformer ("conf") section of the
            Universal Speech Model.
        conf_num_hidden_layers (`int`, *optional*, defaults to 12):
            The number of layers that use local attention inside the Conformer ("conf") section of the
            Universal Speech Model.
        conf_conv_kernel_size (`int`, *optional*, defaults to 5):
            Convolution kernel size for the conformer block inside the Conformer ("conf") section of the
            Universal Speech Model.
        conf_reduction_factor (`int`, *optional*, defaults to 4):
            Reduction factor used in the conformer block inside the Conformer ("conf") section of the
            Universal Speech Model.
        conf_residual_weight (`float`, *optional*, defaults to 0.5):
            Residual connection weight inside the Conformer ("conf") section of the
            Universal Speech Model.
        sscp_conv_channel_size (`tuple(int, int)`, *optional*, defaults to `(128, 32)`):
            The channel sizes for the first and second convolutional layers in the Sub-sample Convolution Projection
            ("sscp") section of the Universal Speech Model.
        sscp_conv_group_norm_eps (`float`, *optional*, defaults to 0.001):
            Epsilon used in group normalization in the subsample convolution projection in the Sub-sample Convolution
            Projection ("sscp") section of the Universal Speech Model.
        sscp_conv_kernel_size (`tuple(tuple(int, int), tuple(int, int))`, *optional*, defaults to `((3, 3), (3, 3))`):
            Kernel sizes of the two convolutional layers in the subsample convolution projection  in the Sub-sample
            Convolution Projection ("sscp") section of the Universal Speech Model. The kernel sizes are specified as a
            tuple of height and width for each layer, where the height corresponds to the time dimension and the width
            corresponds to the frequency dimension.
        sscp_conv_stride_size (`tuple(tuple(int, int), tuple(int, int))`, *optional*, defaults to `((2, 2), (2, 2))`):
            Stride sizes of the two convolutional layers in the subsample convolution projection in the Sub-sample
            Convolution Projection ("sscp") section of the Universal Speech Model. The stride sizes are specified as a
            tuple of height and width for each layer, where the height corresponds to the time dimension and the width
            corresponds to the frequency dimension.

    Example:

    ```python
    >>> from transformers import Gemma3nAudioConfig, Gemma3nAudioEncoder

    >>> # Initializing a Gemma3nAudioEncoder gemma3n_audio-E4B-style configuration
    >>> configuration = Gemma3nAudioConfig()

    >>> # Initializing a model from the gemma3n_audio-E4B style configuration
    >>> model = Gemma3nAudioEncoder(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
    gemma3n_audior   vocab_offsetinput_feat_sizer   r)   gradient_clippingconf_attention_chunk_sizeconf_attention_context_leftconf_attention_context_rightconf_attention_logit_capconf_num_attention_headsconf_num_hidden_layersconf_conv_kernel_sizeconf_reduction_factorconf_residual_weightsscp_conv_channel_sizesscp_conv_group_norm_epssscp_conv_kernel_sizesscp_conv_stride_sizec                 .   t        |   di | || _        || _        || _        || _        || _        || _        || _        || _	        |	| _
        |
| _        || _        || _        || _        || _        || _        || _        || _        || _        || _        y NrD   )rE   rF   rs   r   r)   r   rr   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r~   r   r   r   )rK   r   rr   rs   r   r)   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r~   r   r   r   rL   rP   s                        rQ   rF   zGemma3nAudioConfig.__init__  s    : 	"6".&($(!2)B&+F(,H)(@%(@%&<#%:"%:"$8!&<#(@%%:"%:"rn   )     r   i   rZ   g    _B      r   g      I@rV   r   r@   r[   g      ?)r       gMbP?)r   r   r   )rW   rW   r   )
r_   r`   ra   rb   rc   rg   ri   tuplerF   rl   rm   s   @rQ   rp   rp   .  sT   Sj !J )""#3)++-,-*.()&(%&%&&)2;*.J
J
/0;0; 0; 	0;
 0; 0; !0; $'0; &)0; '*0; #(0; #&0; !$0;  #0;  #0;  $!0;" !&c3h#0;$ #(%0;&  %U38_eCHo%EF'0;.  %U38_eCHo%EF/0; 0;rn   rp   c                        e Zd ZdZdZ	 	 	 	 	 	 	 	 ddedededededed	ed
e	e
   f fdZede
eef   f fd       Zde
eef   f fdZ xZS )Gemma3nVisionConfiga7	  
    This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`]. It is used to
    instantiate an timm model model according to the specified arguments, defining the model architecture.
    Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B
    vision tower, e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B).

    Configuration objects inherit from [`Gemma3nVisionConfig`] and can be used to control the model outputs. Read the
    documentation from [`Gemma3nVisionConfig`] for more information.

    Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default
    imagenet models is set to `None` due to occlusions in the label descriptions.

    Args:
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        do_pooling (`bool`, *optional*, defaults to `False`):
            Whether to do pooling for the last_hidden_state in `TimmWrapper` or not.
        architecture (`str`, *optional*, defaults to `"mobilenetv5_300m_enc"`):
            Determines vision architecture for TimmWrapper.
        hidden_size (`int`, *optional*, defaults to 2048):
            Dimension of the hidden representations.
        vocab_size (`int`, *optional*, defaults to 128):
            Vocabulary size of the additional hard-token embeddings for vision model.
        vocab_offset (`int`, *optional*, defaults to 262144):
            Offset between the tokenizer vocab index for the token ids embedded by `Gemma3nMultimodalEmbedder` and the
            0-indexed `Gemma3nMultimodalEmbedder.embedding` table.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.

    Example:
    ```python
    >>> from transformers import Gemma3nVisionConfig, TimmWrapper

    >>> # Initializing a TimmWrapper gemma3n_vision-E4B-style configuration
    >>> configuration = Gemma3nVisionConfig()

    >>> # Initializing a gemma3n_vision-E4B-style TimmWrapper from the configuration
    >>> model = TimmWrapper(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
    gemma3n_visionr(   
do_poolingarchitecturer   r   rr   r)   
model_argsc	                     t        
|   di |	 || _        || _        || _        || _        || _        || _        || _        || _	        y r   )
rE   rF   r(   r   r   r   r   r   rr   r)   )rK   r(   r   r   r   r   rr   r)   r   rL   rP   s             rQ   rF   zGemma3nVisionConfig.__init__  sR     	"6"!2$$(&$((rn   config_dictc                    |j                  d      }d|v xs d|v }|[|sYt        | dg       t        |      }|r?t        |      }|j	                         }|j                  d      }|D 	cg c]  }	||	   	 }}	|a|s_t        t        |            |d<   t        t        |            t        |      k(  r#t        |      D 
ci c]  \  }
}||

 c}}
|d<   nd |d<   |j                  dd       }|j                  dd       }|xs ||d<   d	|v rd|d	   v r|d	   j                  dd        t        | 0  |fi |S c c}	w c c}}
w )
Nlabel_names
num_labelsid2labeltimmT)as_dictlabel2idnum_classespretrained_cfg)getr   r   r   r   label_descriptionsrk   	enumeraterH   setpoprE   	from_dict)clsr   rL   r   is_custom_modelimagenet_subsetdataset_infosynsetsr   synsetrN   namenum_labels_in_kwargsnum_labels_in_dictrP   s                 rQ   r   zGemma3nVisionConfig.from_dict  sr   !oom4&&0HJ&4H cF8,3K@O+O<&224%1%D%DT%D%R"HOPf1&9PP"?!%i&<!=F: 3{#$K(88=F{=S%T'!TdAg%Tz"%)z"
  &zz,=(__]DA  4I7I| {*}L\@]/]()--mTBw 7773 Q &Us   ,D:D?returnc                     t         |          }| j                  |d<   t        | j                  j                               |d<   |j                  dd        |j                  dd        |S )Nr   r   r   r   )rE   to_dictr   listr   valuesr   )rK   outputrP   s     rQ   r   zGemma3nVisionConfig.to_dict&  sZ    " $} $T]]%9%9%; <}

:t$

:t$rn   )rY   Fmobilenetv5_300m_encrS   r   rR   rZ   N)r_   r`   ra   rb   rc   ri   rj   rh   rg   r   rk   rF   classmethodr   r   r   rl   rm   s   @rQ   r   r     s    *X "J $( 2##%)) ) ) 	)
 ) ) ) ) TN), %8DcN %8 %8Nc3h  rn   r   c                        e Zd ZdZdZeeedZ	 	 	 	 	 	 	 	 	 	 	 	 dde	e
eeeef   f      de	e
eeeef   f      de	e
eeeef   f      deded	ed
edededededef fdZ xZS )Gemma3nConfiga}  
    This is the configuration class to store the configuration of a [`Gemma3nForConditionalGeneration`]. It is used to
    instantiate a Gemma3nForConditionalGeneration according to the specified arguments, defining the model
    architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
    Gemma3n-E4B.

    e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        text_config (`Union[Gemma3nTextConfig, dict]`, *optional*):
            The config object of the text backbone.
        vision_config (`Union[AutoConfig, dict]`,  *optional*):
            Custom vision config or dict.
        audio_config (`Union[AutoConfig, dict]`,  *optional*):
            Custom audio config or dict.
        audio_soft_tokens_per_image (`int`, *optional*, defaults to 188):
            The number of soft tokens per audio clip.
        vision_soft_tokens_per_image (`int`, *optional*, defaults to 256):
            The number of soft tokens per image.
        boi_token_id (`int`, *optional*, defaults to 255999):
            The begin-of-image token index to wrap the image prompt.
        eoi_token_id (`int`, *optional*, defaults to 262144):
            The end-of-image token index to wrap the image prompt.
        image_token_id (`int`, *optional*, defaults to 262145):
            The image token index to encode the image prompt.
        boa_token_id (`int`, *optional*, defaults to 256000):
            The begin-of-audio token index to wrap the audio prompt.
        eoa_token_id (`int`, *optional*, defaults to 262272):
            The end-of-audio token index to wrap the audio prompt.
        audio_token_id (`int`, *optional*, defaults to 262273):
            The audio token index to encode the audio prompt.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.


    Example:

    ```python
    >>> from transformers import Gemma3nForConditionalGeneration, Gemma3nConfig, Gemma3nTextConfig

    >>> # Initializing a MobileNet vision config, which is loaded from TIMM
    >>> vision_config = Gemma3nVisionConfig()

    >>> # Initializing a Gemma3n Audio config
    >>> audio_config = Gemma3nAudioConfig()

    >>> # Initializing a Gemma3n Text config
    >>> text_config = Gemma3nTextConfig()

    >>> # Initializing a Gemma3n gemma-3-4b style configuration
    >>> configuration = Gemma3nConfig(text_config, vision_config, audio_config)

    >>> # Initializing a model from the gemma-3-4b style configuration
    >>> model = Gemma3nTextConfig(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```gemma3n)text_configvision_configaudio_configr   r   r   audio_soft_tokens_per_imagevision_soft_tokens_per_imageboi_token_ideoi_token_idimage_token_idboa_token_ideoa_token_idaudio_token_idr(   c                 :   t        |   di | t        |t              rt	        di |}n!|t	               }t
        j                  d       t        |t              rt        di |}n!|t               }t
        j                  d       t        |t              rt        di |}n!|t               }t
        j                  d       || _	        || _
        || _        || _        || _        || _        || _        || _        |	| _        |
| _        || _        || _        y )Nz5text_config is None. Using default Gemma3nTextConfig.z9vision_config is None. Using default Gemma3nVisionConfig.z7audio_config is None. Using default Gemma3nAudioConfig.rD   )rE   rF   rG   rk   r   loggerinfor   rp   r   r   r   r   r   r   r   r   r   r   r   r(   )rK   r   r   r   r   r   r   r   r   r   r   r   r(   rL   rP   s                 rQ   rF   zGemma3nConfig.__init__u  s     	"6"k4(+:k:K +-KKKOPmT*/@-@M"/1MKKSTlD)-==L!-/LKKQR&*(+F(,H)((,((,!2rn   )NNN   rT   i rR   i  i  r   i  rY   )r_   r`   ra   rb   rc   r   r   rp   sub_configsr   r   rk   rh   r   rg   ri   rF   rl   rm   s   @rQ   r   r   /  s   <| J(,*K KONRLP+.,/##%##%#'03e$5tCH~$EFG03  &94S>&I JK03 u%7c3h%GHI	03
 &)03 '*03 03 03 03 03 03 03 !03 03rn   r   )rp   r   r   r   N)collections.abcr   typingr   r   r   configuration_utilsr   r	   modeling_rope_utilsr
   utilsr   r   r   	timm.datar   r   
get_loggerr_   r   r   rp   r   r   __all__rD   rn   rQ   <module>r      s   , % ' ' J 9 B B = 
		H	%FG( FGRH;) H;Vs* slv3$ v3r ^rn   