
    rhI                        d dl Z d dlZd dlmZmZ ddlmZ ddlmZ ddl	m
Z
mZmZmZ ddlmZmZ  e       r
d dlZdd	lmZ  e       rdd
lmZ  ej.                  e      Z G d de j4                        Z e
 ed             G d de             Z e
 ed             G d de             Z e
 ed             G d de             Zy)    N)AnyUnion   )GenerationConfig)TruncationStrategy)add_end_docstringsis_tf_availableis_torch_availablelogging   )Pipelinebuild_pipeline_init_args)/TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES),MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMESc                       e Zd ZdZdZy)
ReturnTyper   r   N)__name__
__module____qualname__TENSORSTEXT     ~/var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/transformers/pipelines/text2text_generation.pyr   r      s    GDr   r   T)has_tokenizerc            	            e Zd ZdZdZdZdZdZdZ e	dd      Z
dZ fdZ	 	 	 	 	 	 dd	Zd
ededefdZd Zdeeee   f   dedeeeef      f fdZej0                  fdZd Zej8                  dfdZ xZS )Text2TextGenerationPipelinea9  
    Pipeline for text to text generation using seq2seq models.

    Unless the model you're using explicitly sets these generation parameters in its configuration files
    (`generation_config.json`), the following default values will be used:
    - max_new_tokens: 256
    - num_beams: 4

    Example:

    ```python
    >>> from transformers import pipeline

    >>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap")
    >>> generator(
    ...     "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google"
    ... )
    [{'generated_text': 'question: Who created the RuPERTa-base?'}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
    generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
    text generation parameters in [Text generation strategies](../generation_strategies) and [Text
    generation](text_generation).

    This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task
    identifier: `"text2text-generation"`.

    The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
    up-to-date list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available
    parameters, see the [following
    documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)

    Usage:

    ```python
    text2text_generator = pipeline("text2text-generation")
    text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
    ```TF      )max_new_tokens	num_beams	generatedc                     t        |   |i | | j                  | j                  dk(  rt               y t
               y )Ntf)super__init__check_model_type	frameworkr   r   selfargskwargs	__class__s      r   r&   z$Text2TextGenerationPipeline.__init__T   s>    $)&)~~% <	
 >	
r   c                    i }|||d<   |}	i }
|$|"|rt         j                  nt         j                  }|||
d<   |||
d<   |H| j                  j	                  |d      }t        |      dkD  rt        j                  d       |d   |d	<   | j                  | j                  |	d
<   | j                  | j                  |	d<   | j                  |	d<   ||	|
fS )N
truncationreturn_typeclean_up_tokenization_spacesF)add_special_tokensr   zStopping on a multiple token sequence is not yet supported on transformers. The first token of the stop sequence will be used as the stop sequence string in the interim.r   eos_token_idassistant_model	tokenizerassistant_tokenizer)
r   r   r   r5   encodelenwarningswarnr4   r6   )r*   return_tensorsreturn_textr0   r1   r/   stop_sequencegenerate_kwargspreprocess_paramsforward_paramspostprocess_paramsstop_sequence_idss               r   _sanitize_parametersz0Text2TextGenerationPipeline._sanitize_parameters]   s    !.8l+(%+*=0>*,,JOOK"0;}-'3A]=>$ $ 5 5mX] 5 ^$%)b /@.BON++040D0DN,-##/*...N;'484L4LN01 .2DDDr   input_length
min_length
max_lengthc                      y)j
        Checks whether there might be something wrong with given input with regard to the model.
        Tr   r*   rD   rE   rF   s       r   check_inputsz(Text2TextGenerationPipeline.check_inputs   s     r   c                   | j                   | j                   nd}t        |d   t              r;| j                  j                  t        d      |d   D cg c]  }||z   	 c}f}d}n1t        |d   t              r||d   z   f}d}nt        d|d    d       | j                  |||| j                  d}d	|v r|d	= |S c c}w )
N r   zOPlease make sure that the tokenizer has a pad_token_id when using a batch inputTFz `args[0]`: zI have the wrong format. The should be either of type `str` or type `list`)paddingr/   r;   token_type_ids)	prefix
isinstancelistr5   pad_token_id
ValueErrorstr	TypeErrorr(   )r*   r/   r+   rO   argrM   inputss          r   _parse_and_tokenizez/Text2TextGenerationPipeline._parse_and_tokenize   s     $ 7Rd1gt$~~**2 !rss-1!W5cVc\57DGQ%T!W$&DGtAwi'pq   w:^b^l^lmv%'( 6s   Cr+   r,   returnc                     t        |   |i |}t        |d   t              r:t	        d |d   D              r%t	        d |D              r|D cg c]  }|d   	 c}S |S c c}w )a  
        Generate the output text(s) using text(s) given as inputs.

        Args:
            args (`str` or `list[str]`):
                Input text for the encoder.
            return_tensors (`bool`, *optional*, defaults to `False`):
                Whether or not to include the tensors of predictions (as token indices) in the outputs.
            return_text (`bool`, *optional*, defaults to `True`):
                Whether or not to include the decoded texts in the outputs.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not to clean up the potential extra spaces in the text output.
            truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
                The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
                (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
                max_length instead of throwing an error down the line.
            generate_kwargs:
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework [here](./text_generation)).

        Return:
            A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:

            - **generated_text** (`str`, present when `return_text=True`) -- The generated text.
            - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
              ids of the generated text.
        r   c              3   <   K   | ]  }t        |t                y w)N)rP   rT   ).0els     r   	<genexpr>z7Text2TextGenerationPipeline.__call__.<locals>.<genexpr>   s     :BJr3':s   c              3   8   K   | ]  }t        |      d k(    yw)r   N)r8   )r\   ress     r   r^   z7Text2TextGenerationPipeline.__call__.<locals>.<genexpr>   s     4cCHM4s   )r%   __call__rP   rQ   all)r*   r+   r,   resultr`   r-   s        r   ra   z$Text2TextGenerationPipeline.__call__   sf    : !4262tAw%:$q'::4V44&,-sCF-- .s   A c                 0     | j                   |fd|i|}|S )Nr/   )rX   )r*   rW   r/   r,   s       r   
preprocessz&Text2TextGenerationPipeline.preprocess   s#    )))&RZR6Rr   c                    | j                   dk(  r|d   j                  \  }}n8| j                   dk(  r)t        j                  |d         j                         \  }}| j	                  |j                  d| j                  j                        |j                  d| j                  j                               d|vr| j                  |d<    | j                  j                  d
i ||}|j                  d   }| j                   dk(  r( |j                  ||z  g|j                  dd   }d	|iS | j                   dk(  r+t        j                  |||z  g|j                  dd        }d	|iS )Npt	input_idsr$   rE   rF   generation_configr   r   
output_idsr   )r(   shaper$   numpyrJ   getri   rE   rF   modelgeneratereshape)r*   model_inputsr>   in_brD   rj   out_bs          r   _forwardz$Text2TextGenerationPipeline._forward   sm   >>T!!-k!:!@!@D,^^t#!#,{*C!D!J!J!LD,d.D.D.O.OPd.D.D.O.OP	
 o5373I3IO/0(TZZ((K<K?K
  #>>T!+++D%4-W*BRBRSTSUBVWJ j)) ^^t#Ju}0\zGWGWXYXZG[0\]Jj))r   c                    g }|d   d   D ]x  }|t         j                  k(  r| j                   d|i}n@|t         j                  k(  r-| j                   d| j                  j                  |d|      i}|j                         z |S )Nrj   r   
_token_ids_textT)skip_special_tokensr1   )r   r   return_namer   r5   decodeappend)r*   model_outputsr0   r1   recordsrj   records          r   postprocessz'Text2TextGenerationPipeline.postprocess   s    '5a8 	#Jj000!--.j9:F
/''(.0E0E",05Q 1F 1 NN6"	# r   )NNNNNN)r   r   r   __doc___pipeline_calls_generate_load_processor_load_image_processor_load_feature_extractor_load_tokenizerr   _default_generation_configry   r&   rC   intrJ   rX   r   rT   rQ   r   dictra   r   DO_NOT_TRUNCATEre   rt   r   r   r   __classcell__r-   s   @r   r   r      s    'R  $O!#O!1" K
 %)(ET # 3 *$eCcN3 $s $tDQTVYQYNG[ $L -?,N,N *0 6@__ch r   r   c                   >     e Zd ZdZdZ fdZdedededefdZ xZ	S )	SummarizationPipelinea  
    Summarize news articles and other documents.

    This summarizing pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"summarization"`.

    The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is
    currently, '*bart-large-cnn*', '*google-t5/t5-small*', '*google-t5/t5-base*', '*google-t5/t5-large*', '*google-t5/t5-3b*', '*google-t5/t5-11b*'. See the up-to-date
    list of available models on [huggingface.co/models](https://huggingface.co/models?filter=summarization). For a list
    of available parameters, see the [following
    documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)

    Unless the model you're using explicitly sets these generation parameters in its configuration files
    (`generation_config.json`), the following default values will be used:
    - max_new_tokens: 256
    - num_beams: 4

    Usage:

    ```python
    # use bart in pytorch
    summarizer = pipeline("summarization")
    summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)

    # use t5 in tf
    summarizer = pipeline("summarization", model="google-t5/t5-base", tokenizer="google-t5/t5-base", framework="tf")
    summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
    ```summaryc                 "    t        |   |i |S )a  
        Summarize the text(s) given as inputs.

        Args:
            documents (*str* or `list[str]`):
                One or several articles (or one list of articles) to summarize.
            return_text (`bool`, *optional*, defaults to `True`):
                Whether or not to include the decoded texts in the outputs
            return_tensors (`bool`, *optional*, defaults to `False`):
                Whether or not to include the tensors of predictions (as token indices) in the outputs.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not to clean up the potential extra spaces in the text output.
            generate_kwargs:
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework [here](./text_generation)).

        Return:
            A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:

            - **summary_text** (`str`, present when `return_text=True`) -- The summary of the corresponding input.
            - **summary_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
              ids of the summary.
        r%   ra   r)   s      r   ra   zSummarizationPipeline.__call__  s    0 w000r   rD   rE   rF   rY   c           	          ||k  rt         j                  d| d| d       ||k  r#t         j                  d| d| d|dz   d       y	y	)
rH   zYour min_length=z' must be inferior than your max_length=.zYour max_length is set to z , but your input_length is only z. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=r   )NloggerwarningrI   s       r   rJ   z"SummarizationPipeline.check_inputs1  sq     
"NN-j\9`ak`llmno*$NN,ZL8XYeXf g^^jno^o]ppqs %r   )
r   r   r   r   ry   ra   r   boolrJ   r   r   s   @r   r   r      s6    < K14 # 3 SW r   r   c                   n     e Zd ZdZdZdededefdZej                  ddd fd	
Z	d fd
	Z
 fdZ xZS )TranslationPipelinea  
    Translates from one language to another.

    This translation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"translation_xx_to_yy"`.

    The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
    up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=translation).
    For a list of available parameters, see the [following
    documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)

    Unless the model you're using explicitly sets these generation parameters in its configuration files
    (`generation_config.json`), the following default values will be used:
    - max_new_tokens: 256
    - num_beams: 4

    Usage:

    ```python
    en_fr_translator = pipeline("translation_en_to_fr")
    en_fr_translator("How old are you?")
    ```translationrD   rE   rF   c                 L    |d|z  kD  rt         j                  d| d| d       y)Ng?zYour input_length: z" is bigger than 0.9 * max_length: z`. You might consider increasing your max_length manually, e.g. translator('...', max_length=400)Tr   rI   s       r   rJ   z TranslationPipeline.check_inputs\  s=    #
**NN%l^3UV`Ua b^ ^ r   N)r/   src_langtgt_langc                    t        | j                  dd       r) | j                  j                  || j                  |||dS t	        |   |d|iS )N_build_translation_inputs)r;   r/   r   r   r/   )getattrr5   r   r(   r%   rX   )r*   r/   r   r   r+   r-   s        r   re   zTranslationPipeline.preprocessd  sU    4>>#>E;4>>;;dnnV^iq  7.LLLr   c                     t        	|   di |\  }}}|||d<   |||d<   |O|M|j                  d| j                        }|j	                  d      }|rt        |      dk(  r|d   |d<   |d   |d<   |||fS )	Nr   r   task_r   r      r   )r%   rC   rm   r   splitr8   )
r*   r   r   r,   r?   r@   rA   r   itemsr-   s
            r   rC   z(TranslationPipeline._sanitize_parametersl  s    @E@\@f_e@f=>+=,4j),4j) 0::fdii0DJJsOEE
a05a!*-05a!*- .2DDDr   c                 "    t        |   |i |S )a  
        Translate the text(s) given as inputs.

        Args:
            args (`str` or `list[str]`):
                Texts to be translated.
            return_tensors (`bool`, *optional*, defaults to `False`):
                Whether or not to include the tensors of predictions (as token indices) in the outputs.
            return_text (`bool`, *optional*, defaults to `True`):
                Whether or not to include the decoded texts in the outputs.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not to clean up the potential extra spaces in the text output.
            src_lang (`str`, *optional*):
                The language of the input. Might be required for multilingual models. Will not have any effect for
                single pair translation models
            tgt_lang (`str`, *optional*):
                The language of the desired output. Might be required for multilingual models. Will not have any effect
                for single pair translation models
            generate_kwargs:
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework [here](./text_generation)).

        Return:
            A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:

            - **translation_text** (`str`, present when `return_text=True`) -- The translation.
            - **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The
              token ids of the translation.
        r   r)   s      r   ra   zTranslationPipeline.__call__|  s    < w000r   )NN)r   r   r   r   ry   r   rJ   r   r   re   rC   ra   r   r   s   @r   r   r   @  sP    0  K # 3  ,>+M+MX\gk ME 1 1r   r   )enumr9   typingr   r   
generationr   tokenization_utilsr   utilsr   r	   r
   r   baser   r   
tensorflowr$   models.auto.modeling_tf_autor   models.auto.modeling_autor   
get_loggerr   r   Enumr   r   r   r   r   r   r   <module>r      s       ) 3 T T 4 ^X			H	% 
 ,4@AV( V BVr ,4@AG7 G BGT ,4@AY15 Y1 BY1r   