
    rhe                       d dl Z d dlZd dlZd dlmZmZ d dlmZ d dlm	Z	 d dl
mZmZmZmZmZ d dlZd dlmZ d dlmZmZmZmZmZmZmZmZ d dlmZmZm Z m!Z!m"Z" d d	l#m$Z$ d d
l%m&Z& d dl'm(Z(m)Z)m*Z*  e(       r,d dl+m,Z, d dl-m.Z. d dl/m0Z0m1Z1 d dl2m3Z3 d dl4m5Z5 d dl6m7Z7 d dl8m9Z9  e)       rd dl:Z: e!jv                  e<      Z=dZ>dZ? G d de@e      ZA e*d       G d de&             ZBy)    N)MappingSized)Enum)Path)AnyCallableOptionalUnionoverload)load_audio_as)LARGE_INTEGERVERY_LARGE_INTEGERBatchEncodingEncodedInputPreTokenizedInputPreTrainedTokenizerBase	TextInputTruncationStrategy)PaddingStrategy
TensorTypeadd_end_docstringslogging	to_py_obj)is_torch_tensor)PushToHubMixin)is_mistral_common_availableis_torch_availablerequires)ChatCompletionRequest)ValidationMode)SpecialTokenPolicyTokenizerVersion)MultiModalVersion)MistralTokenizer)
Tekkenizer)download_tokenizer_from_hf_huba  
            add_special_tokens (`bool`, *optional*, defaults to `True`):
                Whether or not to add special tokens when encoding the sequences. This will use the underlying
                `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are
                automatically added to the input ids. This is useful if you want to add `bos` or `eos` tokens
                automatically.
            padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
                Activates and controls padding. Accepts the following values:

                - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
                  sequence is provided).
                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
                  acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
                  lengths).
            truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
                Activates and controls truncation. Accepts the following values:

                - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
                  to the maximum acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
                  greater than the model maximum admissible input size).
            max_length (`int`, *optional*):
                Controls the maximum length to use by one of the truncation/padding parameters.

                If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
                is required by one of the truncation/padding parameters. If the model has no specific maximum input
                length (like XLNet) truncation/padding to a maximum length will be deactivated.
            stride (`int`, *optional*, defaults to 0):
                If set to a number along with `max_length`, the overflowing tokens returned when
                `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
                returned to provide some overlap between truncated and overflowing sequences. The value of this
                argument defines the number of overlapping tokens.
            pad_to_multiple_of (`int`, *optional*):
                If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated.
                This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
                `>= 7.5` (Volta).
            padding_side (`str`, *optional*):
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors instead of list of python integers. Acceptable values are:

                - `'pt'`: Return PyTorch `torch.Tensor` objects.
a
  
            return_attention_mask (`bool`, *optional*):
                Whether to return the attention mask. If left to the default, will return the attention mask according
                to the specific tokenizer's default, defined by the `return_outputs` attribute.

                [What are attention masks?](../glossary#attention-mask)
            return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
                of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
                of returning overflowing tokens.
            return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
                Whether or not to return special tokens mask information.
            return_offsets_mapping (`bool`, *optional*, defaults to `False`):
                Whether or not to return `(char_start, char_end)` for each token.

                This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
                Python's tokenizer, this method will raise `NotImplementedError`.
            return_length  (`bool`, *optional*, defaults to `False`):
                Whether or not to return the lengths of the encoded inputs.
            verbose (`bool`, *optional*, defaults to `True`):
                Whether or not to print more information and warnings.
            **kwargs: passed to the `self.tokenize()` method

        Return:
            [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:

            - **input_ids** -- List of token ids to be fed to a model.

              [What are input IDs?](../glossary#input-ids)

            - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
              `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).

              [What are attention masks?](../glossary#attention-mask)

            - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
              `return_overflowing_tokens=True`).
            - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
              `return_overflowing_tokens=True`).
            - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
              regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
            - **length** -- The length of the inputs (when `return_length=True`)
c                       e Zd ZdZdZdZy)MistralTokenizerTypez)Enum for the different type of tokenizer.spmtekkenN)__name__
__module____qualname____doc__r)   r*        {/var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/tokenization_mistral_common.pyr(   r(      s    3
CFr0   r(   )zmistral-common)backendsc            &          e Zd ZU dZddgZee   ed<   dZeed<   dZ	eed<   e
j                  eddd	d
fdeeej                  ef   de
dedededeee      defdZedefd       Zedefd       Zedefd       Zedefd       Zedefd       Zedefd       Zedefd       Zedefd       Zedefd       Zdeeef   fdZ d Z! e"e#dd      	 	 	 	 	 	 	 	 	 	 drd ee$e%f   d!d	d"ed#eeee&f   d$eeee'd	f   d%ee   d&ed'ee   dee   d(eeee(f      d)edee   fd*       Z)	 	 dsd+eeee   d,d-f   d.edee   defd/Z*	 	 dsd0eee   eee      d,d-f   d.edee   dee   fd1Z+d2edefd3Z,e-dtd4ed.edefd5       Z.e-dtd4ee   d.edee   fd6       Z.	 dtd4eeee   f   d.edeeee   f   fd7Z.d8edefd9Z/d:eeee   f   deeee   f   fd;Z0d e$d"edee   fd<Z1d e$dee   fd=Z2de&jf                  e'jh                  d	dd	d	d	d	d
d
d
dfd ee$e%f   d"ed>e&d?e'd%ee   d&ed'ee   dee   d(eeee(f      d@ee   dAedBedCed)ede5fdDZ6de&jf                  e'jh                  d	dd	d	d	d	d
d
d
d
dfdEeee$   ee%   f   d"ed>e&d?e'd%ee   d&ed'ee   dee   d(eeee(f      d@ee   dAedBedFedCed)ede5f dGZ7de8e   fdHZ9	 dudIedJd	dKedee   fdLZ:de&jf                  e'jh                  d	dd	d	d	d	d
d
d
dfdMeee;ee   f      d"ed>e&d?e'd%ee   d&ed'ee   dee   d(ee   d@ee   dAedBedCed)ede5fdNZ< e"e#e=      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dvd4ee   dOd	d"ed#eeee&f   d$eeee'd	f   d%ee   d&ed'ee   dee   d(eeee(f      d@ee   dAedBedCed)edPede5f"dQ       Z>	 	 	 	 	 dwd#eee&ef   d$eeee'ef      d%ee   d'ee   d)ef
dRZ?d	e&jf                  d	d	d	fdSeeee%f   e5f   d%ee   d>e&d'ee   dee   d@ee   defdTZ@	 	 	 	 	 	 	 dxdSee5ee5   eee%f   eeee%   f   eeee%f      f   d#eeee&f   d%ee   d'ee   dee   d@ee   d(eeee(f      d)ede5fdUZA	 	 	 	 dyd4ee   dOd	dVed?eee'f   d&edeBee   d	ee   f   fdWZC	 	 	 	 	 	 	 	 dzdXeeeeef      eeeeef         f   dYeeeeeDf         dZed[ed#eeee&f   d$ed%ee   d(eeee(f      d\edeeee   ee   eee      e5f   fd]ZE e"e#e=      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d{d ee$e%ee$   ee%   d	f   d!d	d^d	d_d	d"ed#eeee&f   d$eeee'd	f   d%ee   d&ed'ee   dee   d(eeee(f      d@ee   dAedBedCed)ede5f$d`       ZFeGe
j                  d	d
d
d	daeddd	d
dbdceeej                  f   de
ddeeeej                  f      deedfedgeeeef      dhededededeee      defdi       ZH	 	 	 	 	 	 	 d|djeeej                  ef   dkedgeeeef      dlee   dmee   dnee   doee   dpee   deBe   fdqZIy	)}MistralCommonTokenizera  
    Class to wrap `mistral-common` tokenizers.

    `mistral-common` is the official tokenizer library for Mistral AI models. To use it, you need to install it with:

    ```bash
    pip install transformers[mistral-common]
    ```

    Otherwise the tokenizer falls back to the Transformers implementation of the tokenizer.

    For more info on `mistral-common`, see [mistral-common](https://github.com/mistralai/mistral-common).

    This class is a wrapper around a `mistral_common.tokens.tokenizers.mistral.MistralTokenizer`.
    It provides a Hugging Face compatible interface to tokenize using the official mistral-common tokenizer.

    Supports the following methods from the `PreTrainedTokenizerBase` class:

    - [`~MistralCommonTokenizer.get_vocab`]: Returns the vocabulary as a dictionary of token to index.
    - [`~MistralCommonTokenizer.encode`]: Encode a string to a list of integers.
    - [`~MistralCommonTokenizer.decode`]: Decode a list of integers to a string.
    - [`~MistralCommonTokenizer.batch_decode`]: Decode a batch of list of integers to a list of strings.
    - [`~MistralCommonTokenizer.convert_tokens_to_ids`]: Convert a list of tokens to a list of integers.
    - [`~MistralCommonTokenizer.convert_ids_to_tokens`]: Convert a list of integers to a list of tokens.
    - [`~MistralCommonTokenizer.tokenize`]: Tokenize a string.
    - [`~MistralCommonTokenizer.get_special_tokens_mask`]: Get the special tokens mask for a list of tokens.
    - [`~MistralCommonTokenizer.prepare_for_model`]: Prepare a list of inputs for the model.
    - [`~MistralCommonTokenizer.pad`]: Pad a list of inputs to the same length.
    - [`~MistralCommonTokenizer.truncate_sequences`]: Truncate a list of sequences to the same length.
    - [`~MistralCommonTokenizer.apply_chat_template`]: Apply a chat template to a list of messages.
    - [`~MistralCommonTokenizer.__call__`]: Tokenize a string or a list of strings.
    - [`~MistralCommonTokenizer.from_pretrained`]: Download and cache a pretrained tokenizer from the Hugging Face model hub or local directory.
    - [`~MistralCommonTokenizer.save_pretrained`]: Save a tokenizer to a directory, so it can be reloaded using the `from_pretrained` class method.
    - [`~MistralCommonTokenizer.push_to_hub`]: Upload tokenizer to the Hugging Face model hub.

    Here are the key differences with the `PreTrainedTokenizerBase` class:

    - Pair of sequences are not supported. The signature have been kept for compatibility but all arguments related to pair of sequences are ignored. The return values of pairs are returned as `None`.
    - The `is_split_into_words` argument is not supported.
    - The `return_token_type_ids` argument is not supported.
    - It is not possible to add new tokens to the tokenizer. Also the special tokens are handled differently from Transformers. In `mistral-common`, special tokens are never encoded directly. This means that: `tokenizer.encode("<s>")` will not return the ID of the `<s>` token. Instead, it will return a list of IDs corresponding to the tokenization of the string `"<s>"`. For more information, see the [mistral-common documentation](https://mistralai.github.io/mistral-common/usage/tokenizers/#special-tokens).

    If you have suggestions to improve this class, please open an issue on the [mistral-common GitHub repository](https://github.com/mistralai/mistral-common/issues) if it is related to the tokenizer or on the [Transformers GitHub repository](https://github.com/huggingface/transformers/issues) if it is related to the Hugging Face interface.
    	input_idsattention_maskmodel_input_namesleftpadding_siderighttruncation_sideNFtokenizer_pathmodemodel_max_lengthclean_up_tokenization_spacesc                 `   |r&t        dt        |j                                d      t        |      | _        t        j                  t        | j                        |      | _        t        | j                  j                  j                  t              rt        j                  nt        j                  | _        || _        || _        || _        || _        i | _        |Ht        |t        t*        f      s+t-        |      dk(  rt/        d |D              st        d      || _        d| _        y)a!	  
        Constructs a `MistralCommonTokenizer`.

        - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
        - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
            Should be `'right'` or `'left'`.
        - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
            applied. Should be `'right'` or `'left'`.

        Args:
            tokenizer_path (`str` or `os.PathLike` or `Path`):
                Path to the tokenizer file to load the `MistralTokenizer`.
            mode (`ValidationMode`, *optional*, defaults to `ValidationMode.test`):
                The mode to use for the tokenizer. This will be passed to the `MistralTokenizer` constructor.
            model_max_length (`int`, *optional*):
                The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
                loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the
                value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will
                default to VERY_LARGE_INTEGER (`int(1e30)`).
            padding_side (`str`, *optional*):
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            truncation_side (`str`, *optional*):
                The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            model_input_names (`List[string]`, *optional*):
                The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
                `"attention_mask"`). Default value is picked from the class attribute of the same name.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not the model should cleanup the spaces that were added when splitting the input text during the
                tokenization process.
        Kwargs z4 are not supported to init `MistralCommonTokenizer`.)r=   Nr   c              3   <   K   | ]  }t        |t                y wN)
isinstancestr).0is     r1   	<genexpr>z2MistralCommonTokenizer.__init__.<locals>.<genexpr>  s     J1Jq#.Js   zV`model_input_names` should be a non-empty list or tuple of str but got an empty value.)
ValueErrorlistkeysr   _tokenizer_pathr$   	from_filerE   	tokenizerrD   instruct_tokenizerr%   r(   r*   r)   _tokenizer_typer;   r9   r>   cleanup_tokenization_spacesdeprecation_warningstuplelenallr7   _cache_get_vocab)	selfr<   r=   r>   r9   r;   r7   r?   kwargss	            r1   __init__zMistralCommonTokenizer.__init__   s   V wtFKKM':&;;opqq#N3+;+E+Ec$J^J^F_fj+k $..;;EEzR !''%)) 	
  /( 0+G($&!(04-@)*a/J8IJJ l  &7D":>r0   returnc                 V    | j                   j                  j                   j                  S )zJ
        Id of the beginning of sentence token in the vocabulary.
        )rN   rO   bos_idrW   s    r1   bos_token_idz#MistralCommonTokenizer.bos_token_id       
 ~~00::AAAr0   c                 V    | j                   j                  j                   j                  S )zD
        Id of the end of sentence token in the vocabulary.
        )rN   rO   eos_idr]   s    r1   eos_token_idz#MistralCommonTokenizer.eos_token_id  r_   r0   c                 V    | j                   j                  j                   j                  S )z<
        Id of the unknown token in the vocabulary.
        )rN   rO   unk_idr]   s    r1   unk_token_idz#MistralCommonTokenizer.unk_token_id&  r_   r0   c                 V    | j                   j                  j                   j                  S )z<
        Id of the padding token in the vocabulary.
        )rN   rO   pad_idr]   s    r1   pad_token_idz#MistralCommonTokenizer.pad_token_id-  r_   r0   c                 8    | j                  | j                        S )zY
        String associated to the beginning of sentence token in the vocabulary.
        )convert_ids_to_tokensr^   r]   s    r1   	bos_tokenz MistralCommonTokenizer.bos_token4      
 ))$*;*;<<r0   c                 8    | j                  | j                        S )zS
        String associated to the end of sentence token in the vocabulary.
        )rj   rb   r]   s    r1   	eos_tokenz MistralCommonTokenizer.eos_token;  rl   r0   c                 8    | j                  | j                        S )zK
        String associated to the unknown token in the vocabulary.
        )rj   re   r]   s    r1   	unk_tokenz MistralCommonTokenizer.unk_tokenB  rl   r0   c                 8    | j                  | j                        S )zK
        String associated to the padding token in the vocabulary.
        )rj   rh   r]   s    r1   	pad_tokenz MistralCommonTokenizer.pad_tokenI  rl   r0   c                 V    | j                   j                  j                   j                  S )z]
        Returns the size of the vocabulary.

        `int`: Size of the vocabulary.
        )rN   rO   n_wordsr]   s    r1   
vocab_sizez!MistralCommonTokenizer.vocab_sizeP  s      ~~00::BBBr0   c                     | j                   Pt        | j                  j                  j                  j	                               D ci c]  \  }}||
 c}}| _         | j                   S c c}}w )u3  
        Returns the vocabulary as a dictionary of token to index.

        This is a lossy conversion. There may be multiple token ids that decode to the same
        string due to partial UTF-8 byte sequences being converted to �.

        Returns:
            `Dict[str, int]`: The vocabulary.
        )rV   	enumeraterN   rO   vocab)rW   idxtokens      r1   	get_vocabz MistralCommonTokenizer.get_vocabY  sa       (-6t~~7X7X7b7b7h7h7j-k%)sEs
%D! $$$%s   A)c                     | j                   S )zD
        Size of the full vocabulary with the added tokens.
        )ru   r]   s    r1   __len__zMistralCommonTokenizer.__len__i  s     r0   z~
            **kwargs: Not supported by `MistralCommonTokenizer.encode`.
                Will raise an error if used.
        zb
        Returns:
            `List[int]`, `torch.Tensor`: The tokenized ids of the text.
        Tr   text	text_pairadd_special_tokenspadding
truncation
max_lengthstridepad_to_multiple_ofreturn_tensorsverbosec                     |r&t        dt        |j                                d      |rt        d      | j                  |||||      \  }}}}| j	                  ||||||||	|
dddd|      }|d   S )a  
        Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.

        Args:
            text (`str` or `List[int]`):
                The first sequence to be encoded. This can be a string or a list of integers (tokenized string ids).
            text_pair (`None`, *optional*):
                Not supported by `MistralCommonTokenizer.encode`. Kept to match `PreTrainedTokenizerBase.encode` signature.
        rA   z6 are not supported by `MistralCommonTokenizer.encode`.z=`MistralCommonTokenizer.encode` does not support `text_pair`.r   r   r   r   r   F)r   padding_strategytruncation_strategyr   r   r   r9   r   return_attention_maskreturn_overflowing_tokensreturn_special_tokens_maskreturn_lengthr   r5   )rI   rJ   rK   "_get_padding_truncation_strategies_encode_plus)rW   r~   r   r   r   r   r   r   r   r9   r   r   rX   r   r   _encoded_inputss                    r1   encodezMistralCommonTokenizer.encodeo  s    F wtFKKM':&;;qrss\]]?C?f?f!!1 @g @
<-z1 **1- 3!1%)"'&+', + 
" k**r0   	token_idsz
np.ndarrayztorch.Tensorskip_special_tokensc                 8   |r&t        dt        |j                                d      |xs | j                  }t	        |      }|rt
        j                  nt
        j                  }| j                  j                  ||      }|rt        j                  |      }|S )a  
        Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
        tokens and clean up tokenization spaces.

        Args:
            token_ids (`Union[int, List[int], np.ndarray, torch.Tensor]`):
                List of tokenized input ids. Can be obtained using the `__call__` method.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens in the decoding.
            clean_up_tokenization_spaces (`bool`, *optional*):
                Whether or not to clean up the tokenization spaces. If `None`, will default to
                `self.clean_up_tokenization_spaces`.
            kwargs (additional keyword arguments, *optional*):
                Not supported by `MistralCommonTokenizer.decode`.
                Will raise an error if used.

        Returns:
            `str`: The decoded sentence.
        rA   z6 are not supported by `MistralCommonTokenizer.decode`.)special_token_policy)rI   rJ   rK   rQ   r   r!   IGNOREKEEPrN   decoder   clean_up_tokenization)rW   r   r   r?   rX   r   decoded_strings          r1   r   zMistralCommonTokenizer.decode  s    4 wtFKKM':&;;qrss'C'gtGgGg$ i(	<O188UgUlUl..yOc.d'4JJ>ZNr0   	sequencesc           	      T    |D cg c]  } | j                   |f||d| c}S c c}w )a  
        Convert a list of lists of token ids into a list of strings by calling decode.

        Args:
            sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor]`):
                List of tokenized input ids. Can be obtained using the `__call__` method.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens in the decoding.
            clean_up_tokenization_spaces (`bool`, *optional*):
                Whether or not to clean up the tokenization spaces. If `None`, will default to
                `self.clean_up_tokenization_spaces`.
            kwargs (additional keyword arguments, *optional*):
                Not supported by `MistralCommonTokenizer.batch_decode`.
                Will raise an error if used.

        Returns:
            `List[str]`: The list of decoded sentences.
        )r   r?   )r   )rW   r   r   r?   rX   seqs         r1   batch_decodez#MistralCommonTokenizer.batch_decode  sJ    @ !
  DKK$7-I 	
 	
 
s   %token_idc                 `   | j                   t        j                  k(  r0|| j                  j                  j                  j                         v S | j                   t        j                  k(  r-|| j                  j                  j                  j                  k  S t        d| j                          )NUnknown tokenizer type: )	rP   r(   r)   rN   rO   _control_tokensr*   num_special_tokensrI   )rW   r   s     r1   _is_control_tokenz(MistralCommonTokenizer._is_control_token  s    #7#;#;;t~~@@JJZZ\\\!!%9%@%@@dnn??II\\\\78L8L7MNOOr0   idsc                      y rC   r/   rW   r   r   s      r1   rj   z,MistralCommonTokenizer.convert_ids_to_tokens  s    Y\r0   c                      y rC   r/   r   s      r1   rj   z,MistralCommonTokenizer.convert_ids_to_tokens	  s    ehr0   c                 "   t        |t              rd}|g}nd}g }|D ]T  }| j                  |      r|r|j                  | j                  j
                  j                  j                  |             V |r|g k(  rt        d| d      |d   S |S )a  
        Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
        added tokens.

        Args:
            ids (`int` or `List[int]`):
                The token id (or token ids) to convert to tokens.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens in the decoding.

        Returns:
            `str` or `List[str]`: The decoded token(s).
        TFzInvalid token id .r   )rD   intr   appendrN   rO   id_to_piecerI   )rW   r   r   	one_tokentokensr   s         r1   rj   z,MistralCommonTokenizer.convert_ids_to_tokens  s    " c3I%CI 	]H%%h/4GMM$..;;EEQQRZ[\	]
 | #4SE!;<<!9r0   piecec                    | j                   t        j                  k(  r9| j                  j                  j                  j
                  j                  |      S | j                   t        j                  k(  rj| j                  j                  j                  j
                  j                  |dt                     }t        |      dk(  sJ dt        |              |d   S t        d| j                          )NrU   )allowed_specialdisallowed_special   z Expected to decode 1 token, got r   r   )rP   r(   r)   rN   rO   _modelpiece_to_idr*   r   setrT   rI   )rW   r   piecess      r1   _piece_to_idz#MistralCommonTokenizer._piece_to_id/  s    #7#;#;;>>44>>EEQQRWXX!!%9%@%@@^^66@@GGNNu O F v;!#U'GF}%UU#!978L8L7MNOOr0   r   c                     t        |t              rd}|g}nd}g }|D ]"  }|j                  | j                  |             $ |r|d   S |S )aT  
        Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
        vocabulary.

        Args:
            tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s).

        Returns:
            `int` or `List[int]`: The token id or list of token ids.
        TFr   )rD   rE   r   r   )rW   r   r   r   rz   s        r1   convert_tokens_to_idsz,MistralCommonTokenizer.convert_tokens_to_ids;  s]     fc"IXFI 	1EJJt((/0	1 q6M
r0   c                 j    | j                   j                  j                   j                  |||      }|S )zW
        Converts a string into a sequence of tokens ids, using the tokenizer.
        )boseos)rN   rO   r   )rW   r~   r   
tokens_idss       r1   _text_to_idsz#MistralCommonTokenizer._text_to_idsU  s;     ^^66@@GG(.@ H 

 r0   c                     |r&t        dt        |j                                d      | j                  | j	                  |d      d      S )a  
        Converts a string into a sequence of tokens, using the tokenizer.

        Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies.

        Args:
            text (`str`):
                The sequence to be encoded.
            **kwargs (additional keyword arguments):
                Not supported by `MistralCommonTokenizer.tokenize`.
                Will raise an error if used.

        Returns:
            `List[str]`: The list of tokens.
        rA   z8 are not supported by `MistralCommonTokenizer.tokenize`.F)r   )r   )rI   rJ   rK   rj   r   )rW   r~   rX   s      r1   tokenizezMistralCommonTokenizer.tokenize^  sP      wtFKKM':&;;stuu))$*;*;DUZ*;*[qv)wwr0   r   r   r   r   r   r   c                      |r&t        dt        |j                                d       fd} ||      } j                  ||j                  |j                  |||||	d|
||||      S )NrA   z< are not supported by `MistralCommonTokenizer._encode_plus`.c                     t        | t              rj                  |       S t        | t        t        f      r#t        |       dkD  rt        | d   t              r| S t        d|  d      )Nr   zInput z? is not valid. Should be a string, or a list/tuple of integers.rD   rE   r   rJ   rS   rT   r   rI   r~   r   rW   s    r1   get_input_idsz:MistralCommonTokenizer._encode_plus.<locals>.get_input_ids  sd    $$((/ABBD4-0SY]zRVWXRY[^G_ 6$/n!oppr0   T)r   r   r   r   r   r   r9   r   prepend_batch_axisr   r   r   r   r   )rI   rJ   rK   prepare_for_modelvalue)rW   r~   r   r   r   r   r   r   r9   r   r   r   r   r   r   rX   r   r   s   ` `               r1   r   z#MistralCommonTokenizer._encode_pluss  s    $ $v{{}-..jk 	q D!%%1$***00!1%)#"7&?'A' & 
 	
r0   
batch_textreturn_offsets_mappingc                       fd}|rt        d      g }|D ]  }|j                   ||               j                  ||||||||
||||	|      }t        |      S )Nc                     t        | t              rj                  |       S t        | t        t        f      r#t        |       dkD  rt        | d   t              r| S t        d      )Nr   zCInput is not valid. Should be a string or a list/tuple of integers.r   r   s    r1   r   z@MistralCommonTokenizer._batch_encode_plus.<locals>.get_input_ids  s[    $$((/ABBD4-0SY]zRVWXRY[^G_ !fggr0   zreturn_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.)r   r   r   r   r   r   r9   r   r   r   r   r   r   )NotImplementedErrorr   _batch_prepare_for_modelr   )rW   r   r   r   r   r   r   r   r9   r   r   r   r   r   r   r   rX   r   r5   r   batch_outputss   ` `                  r1   _batch_encode_plusz)MistralCommonTokenizer._batch_encode_plus  s    ,	h "%8  	 	1C]3/0	1 551- 3!1%"7&?'A') 6 
" ]++r0   c                    | j                   t        j                  k(  r;| j                  j                  j                  j
                  D ch c]  }|d   	 c}S | j                   t        j                  k(  r.| j                  j                  j                  j                         S t        d| j                          c c}w )Nrankr   )	rP   r(   r*   rN   rO   _all_special_tokensr)   r   rI   )rW   ts     r1   _all_special_idsz'MistralCommonTokenizer._all_special_ids  s    #7#>#>>'+~~'H'H'R'R'f'fg!AfIgg!!%9%=%==>>44>>NNPP78L8L7MNOO	 hs   
B<token_ids_0token_ids_1already_has_special_tokensc                     |t        d      |rt        d      | j                         }|D cg c]
  }||v rdnd }}|S c c}w )a  
        Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.

        Args:
            token_ids_0 (`List[int]`):
                List of ids of the sequence.
            token_ids_1 (`List[int]`, *optional*):
                Not supported by `MistralCommonTokenizer`. Kept to match the interface of `PreTrainedTokenizerBase`.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        zh`token_ids_1` is not supported by `MistralCommonTokenizer` and should be `None`, kept for compatibility.z``already_has_special_tokens` is not supported by `MistralCommonTokenizer` and should be `False`.r   r   )rI   r   )rW   r   r   r   all_special_idsrz   special_tokens_masks          r1   get_special_tokens_maskz.MistralCommonTokenizer.get_special_tokens_mask  sk    $ "z  &r  //1Q\]E_$<q!C]]"" ^s   A	batch_idsc                 b   i }|D ]y  }| j                  ||t        j                  j                  |j                  ||ddd|||dd|      }|j	                         D ]"  \  }}||vrg ||<   ||   j                  |       $ { | j                  ||j                  ||||
      }t        ||	      }|S )a_  
        Prepares a sequence of input id so that it can be used by the model. It
        adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
        manages a moving window (with user defined stride) for overflowing tokens.

        Args:
            batch_ids: list of tokenized input ids
        NF)r   r   r   r   r   r   r9   r   r   r   r   r   r   r   )r   r   r   r9   r   tensor_type)r   r   
DO_NOT_PADr   itemsr   padr   )rW   r   r   r   r   r   r   r   r9   r   r   r   r   r   r   r   r   outputskeyr   s                       r1   r   z/MistralCommonTokenizer._batch_prepare_for_model
  s    4  	1C,,#5'2288.44%#'!&+*C+E+##( - G$ &mmo 1
Um+)+M#&c"))%01'	10 $**!1%"7 ! 
 &mPr0   pair_idsr   c                    |t        d      |r&t        dt        |j                                d      | j                  |||||      \  }}}}t	        |      }|d| j
                  v }i }g }|t        j                  k7  r#|r!||kD  r| j                  |||z
  ||      \  }}}|r||d<   ||z
  |d	<   ||| j
                  d
   <   |r*|r| j                  |d      |d<   nd
gt	        |      z  |d<   |t        j                  k7  s|r!| j                  |||j                  ||	|      }|rt	        |d         |d<   t        ||
|      }|S )a&  
        Prepares a sequence of input id so that it can be used by the model. It
        adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
        manages a moving window (with user defined stride) for overflowing tokens.

        Args:
            ids (`List[int]`):
                Tokenized input ids of the first sequence.
            pair_ids (`None`, *optional*):
                Not supported by `MistralCommonTokenizer`. Kept to match the interface of `PreTrainedTokenizerBase`.
        Nze`pair_ids` is not supported by `MistralCommonTokenizer` and should be `None`, kept for compatibility.rA   zA are not supported by `MistralCommonTokenizer.prepare_for_model`.r   r6   )num_tokens_to_remover   r   overflowing_tokensnum_truncated_tokensr   r   )r   r   r   r9   r   r5   length)r   r   )rI   rJ   rK   r   rT   r7   r   DO_NOT_TRUNCATEtruncate_sequencesr   r   r   r   r   r   )rW   r   r   r   r   r   r   r   r   r9   r   r   r   r   r   r   r   rX   r   r   r   len_idsr   r   r   s                            r1   r   z(MistralCommonTokenizer.prepare_for_modelJ  s   @ w  $v{{}-..op  @D?f?f!!1 @g @
<-z1 c( !($48N8N$N!  "4"D"DDX_blXl)-)@)@%,z%9$7	 *A *&C& %3EN/05<z5IN12 58t--a01%!8<8T8TUXZ^8_459:c#h45 999=R!XX%(..#5)&; & N '*>++F'GN8$%K]
 r0   c                    |J|du rF|D|r@| j                   j                  dd      st        j                  d       d| j                   d<   d}|durh|du r5|r"| |	|du s|dk(  rt	        j
                  d       t        j                  }n?t        |t              st        |      }n#t        |t              r|}nt        j                  }|durr|p|du rt        j                  }n.t        |t              st        |      }nt        |t              r|}|t        j                  t        j                  fv rt        d      t        j                  }|
t        j                   k(  rr| j"                  t$        kD  rS|r@| j                   j                  d	d      st        j                  d
       d| j                   d	<   t        j                  }n| j"                  }t        j                  k7  rr| j"                  t$        kD  rS|r@| j                   j                  dd      st        j                  d       d| j                   d<   t        j                  }n| j"                  }t        j                  k7  r&| j&                  | j(                  dk  rt        d      t        j                  k7  r1|t        j                  k7  r||||z  dk7  rt        d| d| d      ||||fS )z?
        Find the correct padding/truncation strategy.
        Fz#Truncation-not-explicitly-activatedat  Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.Tlongest_firstdo_not_truncatez`max_length` is ignored when `padding`=`True` and there is no truncation strategy. To pad to max length, use `padding='max_length'`.zaTruncation strategy `only_first` and `only_second` are not supported by `MistralCommonTokenizer`.zAsking-to-pad-to-max_lengthzAsking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no padding.z Asking-to-truncate-to-max_lengthzAsking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.r   zAsking to pad but the tokenizer does not have a padding token. Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`.zATruncation and padding are both activated but truncation length (z+) is not a multiple of pad_to_multiple_of (z).)rR   getloggerwarningwarningswarnr   LONGESTrD   r   r   LONGEST_FIRST
ONLY_FIRSTONLY_SECONDrI   r   
MAX_LENGTHr>   r   rr   rh   )	rW   r   r   r   r   r   rX   r   r   s	            r1   r   z9MistralCommonTokenizer._get_padding_truncation_strategies  s    !g&6:;M00445Z\abNN) TX))*OP(J %$!-"*jE.AZSdEd P $3#:#: 9#27#; G_5#* .99 U"z'=T!&44 $  
,>?&8&D#J(:;&0#0;;=O=[=[\\ w  #5"D"D ?#=#==((=8#88<<=Z\ab"NN!U TX112OP'6'A'A$!%!6!6J"&8&H&HH((=8#88<<=_afg"NN![ Y]112TU*<*L*L'!%!6!6J 999t~~?UY]YjYjmnYne   #5#E#EE O$>$>>".&00A5&&0\1\]o\pprt 
  !4j&HHr0   r   c                    |d| j                   v }|| j                   d      }|t        j                  k(  rt        |      }||||z  dk7  r||z  dz   |z  }|t        j                  k7  xr t        |      |k7  }|rd|vrdgt        |      z  |d<   |r|t        |      z
  }	||n| j
                  }|dk(  rI|r|d   dg|	z  z   |d<   d|v r|d   dg|	z  z   |d<   || j                  g|	z  z   || j                   d   <   |S |dk(  rI|rdg|	z  |d   z   |d<   d|v rdg|	z  |d   z   |d<   | j                  g|	z  |z   || j                   d   <   |S t        d|       |S )a)  
        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)

        Args:
            encoded_inputs:
                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
            max_length: maximum length of the returned list and optionally padding length (see below).
                Will truncate by taking into account the special tokens.
            padding_strategy: PaddingStrategy to use for padding.

                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
                - PaddingStrategy.DO_NOT_PAD: Do not pad
                The tokenizer padding sides are defined in `padding_side` argument:

                    - 'left': pads on the left of the sequences
                    - 'right': pads on the right of the sequences
            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
                `>= 7.5` (Volta).
            padding_side:
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            return_attention_mask:
                (optional) Set to False to avoid returning attention mask (default: set to model specifics)
        r6   r   r   r:   r   r8   zInvalid padding strategy:)r7   r   r   rT   r   r9   rh   rI   )
rW   r   r   r   r   r9   r   required_inputneeds_to_be_padded
differences
             r1   _padzMistralCommonTokenizer._pad   s   H !($48N8N$N!'(>(>q(AB666^,J!&8&D*WiJimnJn%);;q@DVVJ-1K1KKqPSTbPcgqPq !%5^%K01sS5H/HN+,#c.&99J+7+C<IZIZLw&(7EFV7W[\Z]`jZj7jN#34(N:<JK`<aefdgjtdt<tN#89<JdN_N_M`cmMm<mt55a89  '(89sZ7G.YiJj7jN#34(N:=>C*<L~^sOt<tN#89=A=N=N<OR\<\_m<mt55a89  !#<\N!KLLr0   c	           
         t        |t        t        f      r9t        |d   t              r&|d   D 	
ci c]  }	|	|D 
cg c]  }
|
|	   	 c}
 }}	}
| j                  d   |vr5t        d| j                  d    dt        |j                                      || j                  d      }|t        |t              rt        |      dk(  r	|rg |d<   |S |d   }t        |t        t        f      r|D ]  }t        |      dk7  s|d   } n t        |t        t        t        f      stt        |      r|dn|}n<t        |t        j                        r|dn|}nt        d| dt        |       d	      |j                         D ]  \  }	}t        |      ||	<    | j!                  |||
      \  }}}}|| j                  d      }|r=t        |d   t        t        f      s$| j#                  ||||||      }t%        ||      S t        |      t'        fd|j)                         D              sJ d       |t*        j,                  k(  r"t/        d |D              }t*        j0                  }i }t3              D ]t  }|j                         D ci c]  \  }}|||    }}}| j#                  ||||||      }|j                         D ]"  \  }	}|	|vrg ||	<   ||	   j5                  |       $ v t%        ||      S c c}
w c c}
}	w c c}}w )a  
        Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
        in the batch.

        Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,
        `self.pad_token_id`).
        <Tip>

        If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors, the
        result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
        PyTorch tensors, you will lose the specific device of your tensors however.

        </Tip>

        Args:
            encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
                Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
                tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
                List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
                collate function.

                Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors), see
                the note above for the return type.
            padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
                 Select a strategy to pad the returned sequences (according to the model's padding side and padding
                 index) among:

                - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
                  sequence if provided).
                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
                  acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different
                  lengths).
            max_length (`int`, *optional*):
                Maximum length of the returned list and optionally padding length (see above).
            pad_to_multiple_of (`int`, *optional*):
                If set will pad the sequence to a multiple of the provided value.

                This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
                `>= 7.5` (Volta).
            padding_side (`str`, *optional*):
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            return_attention_mask (`bool`, *optional*):
                Whether to return the attention mask. If left to the default, will return the attention mask according
                to the specific tokenizer's default, defined by the `return_outputs` attribute.

                [What are attention masks?](../glossary#attention-mask)
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors instead of list of python integers. Acceptable values are:

                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return Numpy `np.ndarray` objects.
            verbose (`bool`, *optional*, defaults to `True`):
                Whether or not to print more information and warnings.
        r   zRYou should supply an encoding or a list of encodings to this method that includes z, but you provided r6   ptnpztype of z
 unknown: zA. Should be one of a python, numpy, pytorch or tensorflow object.)r   r   r   )r   r   r   r9   r   r   c              3   :   K   | ]  }t        |      k(    y wrC   rT   )rF   v
batch_sizes     r1   rH   z-MistralCommonTokenizer.pad.<locals>.<genexpr>  s     IA3q6Z'Is   zLSome items in the output dictionary have a different batch size than others.c              3   2   K   | ]  }t        |        y wrC   r
  )rF   inputss     r1   rH   z-MistralCommonTokenizer.pad.<locals>.<genexpr>  s     FVS[Fs   )rD   rJ   rS   r   r7   rI   rK   r   rT   r   r   r  ndarraytyper   r   r   r  r   rU   valuesr   r   maxr   ranger   )rW   r   r   r   r   r9   r   r   r   r   exampler  first_elementitemr   r   r   r   rG   kr  r  r   r  s                          @r1   r   zMistralCommonTokenizer.padj  sv   V ntUm4NSTDUW^9_[ijk[lmTWc#OWGCL#OOmNm !!!$N:!!%!7!7!: ;;NtTbTgTgTiOjNkm 
 ((>(>q(AB!j&GCP^L_cdLd$35/0!! 'q)mdE]3& t9>$(GM
 -#tU);<}-)7)?^M2::6)7)?^ }oZ]8K7L MV V 
 -224 7
U&/&6s#7 .2-T-T
G .U .
*!Z ((>(>q(AB*^A->u"N!YY%!1#5)&; ' N !^LL(
I1F1F1HII 	
Z	
I 666F~FFJ.99z" 	1A*8*>*>*@A$!Qa1gAFAii%!1#5)&;   G &mmo 1
Um+)+M#&c"))%01	1  ]GGk $PmL Bs   
LK<	LL<Lr   c                    |r&t        dt        |j                                d      |rt        d      |dk  r|dg fS t        |t              st	        |      }|t        j
                  t        j                  fv r.t        dt        j                   dt        j                   d      g }|t        j                  k(  rt        |      |kD  rgt        t        |      ||z         }| j                  d	k(  r|d| }||d }n\| j                  d
k(  r|| d }|d|  }n@t        d| j                   d      d| dt        |       d}	t        j                  |	       |d|fS )ap  
        Truncates a sequence pair in-place following the strategy.

        Args:
            ids (`List[int]`):
                Tokenized input ids. Can be obtained from a string by chaining the `tokenize` and
                `convert_tokens_to_ids` methods.
            pair_ids (`None`, *optional*):
                Not supported by `MistralCommonTokenizer`. Kept to match the signature of `PreTrainedTokenizerBase.truncate_sequences`.
            num_tokens_to_remove (`int`, *optional*, defaults to 0):
                Number of tokens to remove using the truncation strategy.
            truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `'longest_first'`):
                The strategy to follow for truncation. Can be:

                - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided.
                - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
                  than the model maximum admissible input size).
            stride (`int`, *optional*, defaults to 0):
                If set to a positive number, the overflowing tokens returned will contain some tokens from the main
                sequence returned. The value of this argument defines the number of additional tokens.

        Returns:
            `Tuple[List[int], None, List[int]]`: The truncated `ids` and the list of
            overflowing tokens. `None` is returned to match Transformers signature.
        rA   zB are not supported by `MistralCommonTokenizer.truncate_sequences`.zK`pair_ids` is not supported by `MistralCommonTokenizer.truncate_sequences`.r   NzOnly z and z are supported.r8   r:   zinvalid truncation strategy: z, use 'left' or 'right'.zWe need to remove z; to truncate the input but the first sequence has a length z. )rI   rJ   rK   rD   r   r   r   r   r   rT   minr;   r   error)
rW   r   r   r   r   r   rX   r   
window_len	error_msgs
             r1   r   z)MistralCommonTokenizer.truncate_sequences  s   F $v{{}-..pq  jkk1$r?"-/AB"45H"I#5#@#@BTB`B`"aa*889?Q?a?a>bbqr   "4"B"BB3x.. S64H+HI
''61),[j)9&234C))W4),j[\):&4 445C$'DTEYEYDZZr%stt ))=(> ?;;>s8*BH  Y'T-..r0   conversationtoolscontinue_final_messager   return_dictc
                    |
r&t        dt        |
j                                d      t        |t              st        d      t        |t        t
        f      r-t        |d   t        t
        f      st        |d   d      r|}d}n|g}d}dt        t        t        f   d	d
fd}g }g }g }|D ]  }g }|D ]  } ||       |j                  |        t        j                  |||      }| j                  j                  |      }|r|j                  |j                         n|j                  |j                          |j#                  |j$                         |j#                  |j&                  D cg c]  }|j(                   c}        |s|d   }|r | ||||d|      }|	r|rm|dk(  r+t+               st-        d      t/        j0                  |      }n.|dk(  rt3        j4                  |      }n||}nt        d|       ||j6                  d<   |r|t9        d      ||j6                  d<   |S |d   S t:        j=                  d       |S c c}w )aj  
        Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token
        ids.

        Args:
            conversation (Union[List[Dict[str, str]], List[List[Dict[str, str]]]]): A list of dicts
                with "role" and "content" keys, representing the chat history so far.
            tools (`List[Union[Dict, Callable]]`, *optional*):
                A list of tools (callable functions) that will be accessible to the model. If the template does not
                support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
                giving the name, description and argument types for the tool. See our
                [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
                for more information.
            continue_final_message (bool, *optional*):
                If this is set, the chat will be formatted so that the final
                message in the chat is open-ended, without any EOS tokens. The model will continue this message
                rather than starting a new one. This allows you to "prefill" part of
                the model's response for it. Cannot be used at the same time as `add_generation_prompt`.
            tokenize (`bool`, defaults to `True`):
                Whether to tokenize the output. If `False`, the output will be a string.
            padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
                 Select a strategy to pad the returned sequences (according to the model's padding side and padding
                 index) among:

                - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
                  sequence if provided).
                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
                  acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
                  lengths).
            truncation (`bool`, defaults to `False`):
                Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
            max_length (`int`, *optional*):
                Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
                not specified, the tokenizer's `max_length` attribute will be used as a default.
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
                values are:
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
            return_dict (`bool`, defaults to `False`):
                Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
                If at least one conversation contains an image, its pixel values will be returned in the `pixel_values` key.
            kwargs (additional keyword arguments, *optional*):
                Not supported by `MistralCommonTokenizer.apply_chat_template`.
                Will raise an error if used.

        Returns:
            `Union[str, List[int], List[str], List[List[int]], BatchEncoding]`: A list of token ids representing the tokenized chat so far, including control
            tokens. This output is ready to pass to the model, either directly or via methods like `generate()`.
        rA   zC are not supported by `MistralCommonTokenizer.apply_chat_template`.z@`truncation` must be a boolean for `apply_chat_template` method.r   messagesTFmessagerZ   Nc                 Z   t        | t              sy| j                  d      }|rt        |t              ryg }|D ]i  }|j                  dd      }|s|dk(  r|j                  d      }|j                  d      }|j                  d      }|r|}nc|r;|j	                  d      s't        |      j                         j                         }|}n&|r|j	                  d	      sd
|z   }|}nt        d      |j                  dd|id       |dk(  r|j                  d      }|j                  d      }|j                  d      }|s|r(t        |xs |dd      }	|j                  d|	d       4|st        d      |j                  dd|id       Y|j                  |       l || d<   y)zRAdapt message to `mistral-common` format and leave validation to `mistral-common`.Ncontentr  imageurlpathbase64zfile://z
data:imagezdata:image/unk;base64,z Image content must be specified.	image_url)r  r*  audiodictT)return_format
force_monoinput_audio)r  r/  z Audio content must be specified.	audio_url)r  r0  )rD   r,  r   rE   
startswithr   resolveas_urirI   r   r   )
r#  maybe_list_contentnormalized_contentr%  content_type	maybe_url
maybe_pathmaybe_base64image_content
audio_datas
             r1   _maybe_adapt_messagezHMistralCommonTokenizer.apply_chat_template.<locals>._maybe_adapt_message  s   gt,dkdodoe &4F)LNP- !7&{{648#!W,/6{{5/AI07F0CJ29++h2GL (1#)44Y?)-j)9)A)A)C)J)J)LJ(2%+66|D+Cl+RL(4()KLL&--{RWYfQg.hi!W,/6{{5/AI07F0CJ29++h2GL J%293J
Z`mq%r
*11=Yc2de '()KLL&--{RWYeQf.gh&--g6C!7D "4GIr0   )r"  r  r  )r   r   r   r   r   r  zMUnable to convert output to PyTorch tensors format, PyTorch is not installed.r  z!Unsupported return_tensors type: pixel_valueszWhen passing audio content in apply_chat_template, `return_tensors` must be None since we cannot batch the audio inputs. The returned audio will be a list of numpy arrays.r+  r5   z`MistralCommonTokenizer.apply_chat_template(..., tokenize=False)` is unsafe and may lead to unexpected behavior. Please consider using `tokenize=True` instead and don't encode the output manually.)rI   rJ   rK   rD   boolrS   hasattrr,  rE   r   r   r   from_openairN   encode_chat_completionr   r~   extendimagesaudiosaudio_arrayr   ImportErrortorchtensorr  arraydatar   r   r   )rW   r  r  r  r   r   r   r   r   r   rX   conversations
is_batchedr<  r   rC  rD  r"  r#  chat_requesttokenized_requesteloutr=  s                           r1   apply_chat_templatez*MistralCommonTokenizer.apply_chat_templateX  s   ~ $v{{}-..qr  *d+_``lT5M2|Au6',q/S]:^(MJ)NMJ-	4$sCx. -	4T -	4^ #%#%) 	OLacH' )$W-() 1<<!'=L !% E El S07780556MM+223MM4E4L4LMb2>>MN%	O( ajG%%#(-C %-13"- o#  (-||F';'4/')xx'7'/'-(+L^L\)]^^/;CHH^,%11 J  )/CHHW%
;'' NNg N_ Ns   2I
text_targettext_pair_targetc                    |r&t        dt        |j                                d      |s|s|rt        d      |dv rt        d      d } ||      st        d      t        |t        t        f      xr t        |d   t
        t        t        f      } | j                  d||||
|d	|\  }}}}|r! | j                  d||||||	|
|||||||d
|S  | j                  d||||||	|
|||||||d|S )a~  
        Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
        sequences.

        Args:
            text (`str`, `List[str]`, `List[List[str]]`, *optional*):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of int
                (encoded strings).
            text_pair (`None`, *optional*):
                Not supported by `MistralCommonTokenizer`. Kept to match the signature of `PreTrainedTokenizerBase.__call__`.
            text_target (`None`, *optional*):
                Not supported by `MistralCommonTokenizer`. Kept to match the signature of `PreTrainedTokenizerBase.__call__`.
            text_pair_target (`None`, *optional*):
                Not supported by `MistralCommonTokenizer`. Kept to match the signature of `PreTrainedTokenizerBase.__call__`.
        rA   z8 are not supported by `MistralCommonTokenizer.__call__`.z``text_pair`, `text_target` and `text_pair_target` are not supported by `MistralCommonTokenizer`.)tfjaxzZ`MistralCommonTokenizer` does not support `return_tensors='tf'` or `return_tensors='jax'`.c                 6   t        | t              ryt        | t        t        f      rrt	        |       dk(  ryt        | d   t        t
        f      ryt        | d   t        t        f      r/t	        | d         dk(  xs t        | d   d   t        t
        f      S yy)NTr   F)rD   rE   rJ   rS   rT   r   )r   s    r1   _is_valid_text_inputz=MistralCommonTokenizer.__call__.<locals>._is_valid_text_inputO  s    !S!Ae}-q6Q;!sCj1!tUm4qt9>LZ!Q#s-LL r0   ztext input must be of type `str` (single example), `List[str]` (batch or single encoded example) or `List[List[int]]` (batch of encoded examples).r   r   )r   r   r   r   r   r   r   r9   r   r   r   r   r   r   )r~   r   r   r   r   r   r   r9   r   r   r   r   r   r   r/   )	rI   rJ   rK   rD   rS   rE   r   r   r   )rW   r~   r   rR  rS  r   r   r   r   r   r   r9   r   r   r   r   r   r   rX   rX  rL  r   r   s                          r1   __call__zMistralCommonTokenizer.__call__  s   J wtFKKM':&;;stuu'7r  ]*l 	( $D)D 
  tUm4`DGcSWY^M_9`
DkDDkDk E
!!1E
 E
A-z6 *4** #5!1$7%#5)-&;*C+E+  $ %4$$ #5!1$7%#5)-&;*C+E+  r0   main)r=   	cache_dirforce_downloadlocal_files_onlyrz   revisionr>   r9   r;   r7   r?   pretrained_model_name_or_pathr[  r\  r]  rz   r^  c          	         |rt        d      |rPt        |j                               j                  ddh      s&t        dt	        |j                                d      t
        j                  j                  |      st        ||||||      }nTg }t	        t        j                        }t	        t        j                        dgz   }|D cg c]  }|D ]	  }d| |   c}}d	gz   }t        j                  |      D ]a  }t        |      }|j                  }dj                  |j                         }|d
k(  r|j#                  |       L||v sQ|j#                  |       c t%        |      dk(  rt        d|       t%        |      dkD  r2d
|v rd
}nt'        |      d   }t(        j+                  d| d| d       n|d   }t
        j                  j                  ||      } | ||||	|
||      S c c}}w )a  
        Instantiate a `MistralCommonTokenizer` from a predefined
        tokenizer.

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                Can be either:

                - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
                - A path to a *directory* containing the tokenizer config, for instance saved
                  using the [`MistralCommonTokenizer.tokenization_mistral_common.save_pretrained`] method, e.g.,
                  `./my_model_directory/`.
            mode (`ValidationMode`, *optional*, defaults to `ValidationMode.test`):
                Validation mode for the `MistralTokenizer` tokenizer.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
                standard cache should not be used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
                exist.
            token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
                when running `hf auth login` (stored in `~/.huggingface`).
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether or not to only rely on local files and not to attempt to download any files.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.
            max_length (`int`, *optional*):
                Controls the maximum length to use by one of the truncation/padding parameters.

                If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
                is required by one of the truncation/padding parameters. If the model has no specific maximum input
                length (like XLNet) truncation/padding to a maximum length will be deactivated.
            padding_side (`str`, *optional*, defaults to `"left"`):
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            truncation_side (`str`, *optional*, defaults to `"right"`):
                The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
            model_input_names (`List[string]`, *optional*):
                The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
                `"attention_mask"`). Default value is picked from the class attribute of the same name.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not the model should cleanup the spaces that were added when splitting the input text during the
                tokenization process.
            kwargs (additional keyword arguments, *optional*):
                Not supported by `MistralCommonTokenizer.from_pretrained`.
                Will raise an error if used.
        zL`init_inputs` are not supported by `MistralCommonTokenizer.from_pretrained`.
_from_autotrust_remote_coderA   z? are not supported by `MistralCommonTokenizer.from_pretrained`.)repo_idr[  rz   r^  r\  r]   z.model.z.modelztekken.jsonr   z&No tokenizer file found in directory: r   z-Multiple tokenizer files found in directory: z. Using r   )r<   r=   r>   r9   r;   r7   r?   )rI   r   rK   issubsetrJ   osr(  isdirr&   r"   __members__r#   listdirr   namejoinsuffixesr   rT   sortedr   r   )clsr_  r=   r[  r\  r]  rz   r^  r>   r9   r;   r7   r?   init_inputsrX   r<   valid_tokenizer_filesinstruct_versionsmm_versionsr  msentencepiece_suffixesr(  pathlib_repo_file	file_namesuffixtokenizer_files                              r1   from_pretrainedz&MistralCommonTokenizer.from_pretrained  s"   H kll #fkkm,55|EX6YZ$v{{}-..mn  ww}}:;;5#!-!1N %'! !%%5%A%A B0<<=DK@Q%g1[f%gVWs1#&6%g&6%gksjt%t"

#@A <$(J!-22	!2!;!;<-)00;55)00;< ()Q. #IJgIh!ijj()A- $99%2N%+,A%B2%FNCDaCbbjkyjzz{| "7q!9WW\\*GXN)-%+/)E
 	
7 &hs   G5save_directorypush_to_hubcommit_messagerc  privaterepo_urlorganizationc	                 (   |	j                  dd       |	r&t        dt        |	j                                d      t	        |      }|j                  dd       t        j                  | j                  |       |rv|xs5 t        |      j                  t        j                  j                        d   }| j                  |||||      }| j                  |      }
| j!                  |||
||	       t        || j                  j"                  z        fS )
a^  
        Save the full tokenizer state.


        This method make sure the full tokenizer can then be re-loaded using the
        [`~MistralCommonTokenizer.tokenization_mistral_common.from_pretrained`] class method.

        Args:
            save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
            push_to_hub (`bool`, *optional*, defaults to `False`):
                Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
                repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
                namespace).
            token (`str` or *bool*, *optional*, defaults to `None`):
                The token to use to push to the model hub. If `True`, will use the token in the `HF_TOKEN` environment
                variable.
            commit_message (`str`, *optional*): The commit message to use when pushing to the hub.
            repo_id (`str`, *optional*): The name of the repository to which push to the Hub.
            private (`bool`, *optional*): Whether the model repository is private or not.
            repo_url (`str`, *optional*): The URL to the Git repository to which push to the Hub.
            organization (`str`, *optional*): The name of the organization in which you would like to push your model.
            kwargs (`Dict[str, Any]`, *optional*):
                Not supported by `MistralCommonTokenizer.save_pretrained`.
                Will raise an error if used.

        Returns:
            A tuple of `str`: The files saved.
        save_jinja_filesNrA   z? are not supported by `MistralCommonTokenizer.save_pretrained`.T)parentsexist_okre  )rz   r~  r  r  )r}  rz   )poprI   rJ   rK   r   mkdirshutilcopyrL   rE   splitrg  r(  sep_create_repo_get_files_timestamps_upload_modified_filesrk  )rW   r{  r|  rz   r}  rc  r~  r  r  rX   files_timestampss              r1   save_pretrainedz&MistralCommonTokenizer.save_pretrained  s   R 	

%t,$v{{}-..mn  n-TD9D((.9K^!4!:!:277;;!G!KG''ugWc ( G  $99.I'' - (  NT%9%9%>%>>?AAr0   )
NTFNNr   NNNT)FN)F)NF)NTFNNr   NNNNFFFTF)FNNNT)TNNNNNT)Nr   r   r   )NFTFFNNF)NNNNTFNNr   NNNNFFFT)FNNNNNN)Jr+   r,   r-   r.   r7   rJ   rE   __annotations__r9   r;   r    testr   r
   rg  PathLiker   r   r	   r>  rY   propertyr^   rb   re   rh   rk   rn   rp   rr   ru   r,  r{   r}   r   ENCODE_KWARGS_DOCSTRINGr   r   r   r   r   r   r   r   r   r   rj   r   r   r   r   r   r   r   r   r   r   r   r   r   r   'ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRINGr   r   r  r   rS   r   r   rQ  rY  classmethodrz  r  r/   r0   r1   r4   r4      s   +Z %01A#BtCyBL#"OS"
  .22 2"&15-2F?c2;;45F? F? 	F?
 F? F? $DI.F? '+F?P Bc B B Bc B B Bc B B Bc B B =3 = = =3 = = =3 = = =3 = = CC C C%4S> %  		
 #'5:AE$(,0&*;?6+I|+,6+ 6+ !	6+
 tS/126+ $%7=>6+ SM6+ 6+ %SM6+ sm6+ !sJ!786+ 6+ 
c6+
6+v %*7;	(d3i~EF( "( '/tn	( 
(Z %*7;	!
cDcO\>QR!
 "!
 '/tn	!
 
c!
FP# P$ P \\4\TW\ \hchhZ^_bZch hFK"d3i("?C"	sDI~	"H
P# 
P# 
PE#tCy.,A eCQUVYQZNF[ 4  c xY xT#Y x0 $(,;,F,F2D2T2T$(,0&*;?04*/+0#1
I|+,1
 !1
 *	1

 01
 SM1
 1
 %SM1
 sm1
 !sJ!781
  (~1
 $(1
 %)1
 1
 1
" 
#1
r $(,;,F,F2D2T2T$(,0&*;?04*/+0',#':,O 
:, !:, *:, 0:, SM:, :, %SM:, sm:, !sJ!78:,  (~:, $(:,  %)!:," !%#:,$ %:,& ':,* 
+:,xP#c( P _d##.2#W[#	c#F $(,;,F,F2D2T2T$(,0&*(,04*/+0#>/c:;<> !> *	>
 0> SM> > %SM> sm> !>  (~> $(> %)> > >  
!>@ /1XY #'5:AE$(,0&*;?04*/+0##(#`#Y` ` !	`
 tS/12` $%7=>` SM` ` %SM` sm` !sJ!78`  (~` $(` %)` `  !`" !#`& 
'` Z`H 6;EI$(,0qIsOT12qI U3(:D#@ABqI SM	qI
 %SMqI qIl %),;,F,F,0&*04Hd3#45}DEH SMH *	H
 %SMH smH  (~H 
Hf 6:$(,0&*04;?aHl"#d<(()c<'()	+
aH tS/12aH SMaH %SMaH smaH  (~aH !sJ!78aH aH  
!aHL $%>MI/#YI/ I/ "	I/
 #3(:#:;I/ I/ 
tCy$S	)	*I/\ 8<',5: $(;?!CDc3h0$tDcN7K2LLMC U4>234C !%	C
 C tS/12C C SMC !sJ!78C C 
sDItCy$tCy/=H	ICJ /1XY [_ !%#'5:AE$(,0&*;?04*/+0#%yI|T)_d<>PRVVWy y 	y
 y !y tS/12y $%7=>y SMy y %SMy smy !sJ!78y  (~y $(y  %)!y" #y$ %y( 
)y Zyv 
  .227;$!&,0 2"&15-2~
',S"++-='>~
 	~

 E#r{{"234~
 ~
 ~
 c4i()~
 ~
 ~
 ~
 ~
 $DI.~
 '+~
 ~
F ",0(,!%"&"&&*CBc2;;45CB CB c4i()	CB
 !CB #CB $CB 3-CB smCB 
sCBr0   r4   )Crg  r  r   collections.abcr   r   enumr   pathlibr   typingr   r   r	   r
   r   numpyr  transformers.audio_utilsr   $transformers.tokenization_utils_baser   r   r   r   r   r   r   r   transformers.utilsr   r   r   r   r   transformers.utils.genericr   transformers.utils.hubr   transformers.utils.import_utilsr   r   r   (mistral_common.protocol.instruct.requestr   *mistral_common.protocol.instruct.validatorr    %mistral_common.tokens.tokenizers.baser!   r"   &mistral_common.tokens.tokenizers.imager#   (mistral_common.tokens.tokenizers.mistralr$   'mistral_common.tokens.tokenizers.tekkenr%   &mistral_common.tokens.tokenizers.utilsr&   rG  
get_loggerr+   r   r  r  rE   r(   r4   r/   r0   r1   <module>r     s    
   *   ; ;  2	 	 	 c b 6 1 e e  NIZHIBU  
		H	%, \*+ 'Z3  
&'B^ B (Br0   