
    rhS                     `    d Z ddlmZ ddlmZ  ej
                  e      Z G d de      ZdgZ	y)zTimesFM model configuration   )PretrainedConfig)loggingc            %            e Zd ZdZdZg ZdZdddddd	d	d
dddg dddddddfdededededededededededede	e   ded ed!e
d"ed#ed$ef$ fd%Z xZS )&TimesFmConfiga%  
    This is the configuration class to store the configuration of a [`TimesFmModelForPrediction`] or a [`TFTimesFmModel`]. It is used to
    instantiate a TimesFM model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the TimesFM
    [google/timesfm-2.0-500m-pytorch](https://huggingface.co/google/timesfm-2.0-500m-pytorch) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Arguments:
        patch_length (`int`, *optional*, defaults to 32):
            The length of one patch in the input sequence.
        context_length (`int`, *optional*, defaults to 512):
            The length of the input context.
        horizon_length (`int`, *optional*, defaults to 128):
            The length of the prediction horizon.
        freq_size (`int`, *optional*, defaults to 3):
            The number of frequency embeddings.
        num_hidden_layers (`int`, *optional*, defaults to 50):
            Number of Transformer layers.
        hidden_size (`int`, *optional*, defaults to 1280):
            Size of the hidden layers in the feed-forward networks.
        intermediate_size (`int`, *optional*, defaults to 1280):
            Dimension of the MLP representations.
        head_dim (`int`, *optional*, defaults to 80):
            Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
            be defined as `num_attention_heads * head_dim`.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        tolerance (`float`, *optional*, defaults to 1e-06):
            The tolerance for the quantile loss.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the RMS normalization layers.
        quantiles (`list[float]`, *optional*, defaults to `[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]`):
            The quantiles to predict.
        pad_val (`float`, *optional*, defaults to 1123581321.0):
            The value used to pad the predictions.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability for the attention scores.
        use_positional_embedding (`bool`, *optional*, defaults to `False`):
            Whether to add positional embeddings.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        min_timescale (`int`, *optional*, defaults to 1):
            The start of the geometric positional index. Determines the periodicity of
            the added signal.
        max_timescale (`int`, *optional*, defaults to 10000):
            The end of the geometric positional index. Determines the frequency of the
            added signal.
    timesfmF    i      r   2   i   P      gư>)	g?g?g333333?g?g      ?g333333?gffffff?g?g?g  @bAg        g{Gz?   i'  patch_lengthcontext_lengthhorizon_length	freq_sizenum_hidden_layershidden_sizeintermediate_sizehead_dimnum_attention_heads	tolerancerms_norm_eps	quantilespad_valattention_dropoutuse_positional_embeddinginitializer_rangemin_timescalemax_timescalec                 8   || _         || _        || _        || _        || _        || _        || _        || _        || _        || _	        |	| _
        |
| _        || _        || _        || _        || _        || _        || _        t%        | L  dd| j(                  i| y )Nis_encoder_decoder )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   super__init__r!   )selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   kwargs	__class__s                       /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/models/timesfm/configuration_timesfm.pyr$   zTimesFmConfig.__init__P   s    , ),,""&!2 !2#6 "(!2(@%!2** 	
#66	
	
    )__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencer!   intfloatlistboolr$   __classcell__)r'   s   @r(   r   r      s   1f J"$ !!!#!%#%"!N%#&).#'#',
,
 ,
 	,

 ,
 ,
 ,
 ,
 ,
 !,
 ,
 ,
 ;,
 ,
 !,
  #'!,
" !#,
$ %,
& ',
 ,
r)   r   N)
r-   configuration_utilsr   utilsr   
get_loggerr*   loggerr   __all__r"   r)   r(   <module>r:      s>    " 3  
		H	%d
$ d
N 
r)   