
    rhP                    \   d dl mZ d dlZd dlZd dlmZ d dlmZmZ d dl	Z	d dl
mZ d dl	mZ d dlmZmZmZ d dlmZ d d	lmZ d d
lmZ d dlmZ d dlmZ d dlmZmZ d dlm Z  d dl!m"Z" d dl#m$Z$m%Z% d dl&m'Z' d dl(m)Z)m*Z*  e)       r
d dl+m,Z,m-Z-m.Z.  ej^                  e0      Z1 G d de'      Z2y)    )annotationsN)partial)AnyCallable)parse)nn)EvalPredictionPreTrainedTokenizerBaseTrainerCallback)__version__)DataCollator)WandbCallbackCrossEncoder)CrossEncoderDataCollator)BinaryCrossEntropyLossCrossEntropyLoss)CrossEncoderModelCardCallback)CrossEncoderTrainingArguments)SentenceEvaluatorSequentialEvaluator)SentenceTransformerTrainer)is_datasets_availableis_training_available)DatasetDatasetDictIterableDatasetc                       e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 d	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d	 fdZd
dZ	 	 	 	 ddZddZd fdZddZ	 xZ
S )CrossEncoderTraineru  
    CrossEncoderTrainer is a simple but feature-complete training and eval loop for PyTorch
    based on the 🤗 Transformers :class:`~transformers.Trainer`.

    This trainer integrates support for various :class:`transformers.TrainerCallback` subclasses, such as:

    - :class:`~transformers.integrations.WandbCallback` to automatically log training metrics to W&B if `wandb` is installed
    - :class:`~transformers.integrations.TensorBoardCallback` to log training metrics to TensorBoard if `tensorboard` is accessible.
    - :class:`~transformers.integrations.CodeCarbonCallback` to track the carbon emissions of your model during training if `codecarbon` is installed.

        - Note: These carbon emissions will be included in your automatically generated model card.

    See the Transformers `Callbacks <https://huggingface.co/docs/transformers/main/en/main_classes/callback>`_
    documentation for more information on the integrated callbacks and how to write your own callbacks.

    Args:
        model (:class:`~sentence_transformers.SentenceTransformer`, *optional*):
            The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.
        args (:class:`~sentence_transformers.training_args.SentenceTransformerTrainingArguments`, *optional*):
            The arguments to tweak for training. Will default to a basic instance of
            :class:`~sentence_transformers.training_args.SentenceTransformerTrainingArguments` with the
            `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
        train_dataset (Union[:class:`datasets.Dataset`, :class:`datasets.DatasetDict`, :class:`datasets.IterableDataset`, Dict[str, :class:`datasets.Dataset`]], *optional*):
            The dataset to use for training. Must have a format accepted by your loss function, see
            `Training Overview > Dataset Format <../../../docs/sentence_transformer/training_overview.html#dataset-format>`_.
        eval_dataset (Union[:class:`datasets.Dataset`, :class:`datasets.DatasetDict`, :class:`datasets.IterableDataset`, Dict[str, :class:`datasets.Dataset`]], *optional*):
            The dataset to use for evaluation. Must have a format accepted by your loss function, see
            `Training Overview > Dataset Format <../../../docs/sentence_transformer/training_overview.html#dataset-format>`_.
        loss (Optional[Union[:class:`torch.nn.Module`, Dict[str, :class:`torch.nn.Module`],            Callable[[:class:`~sentence_transformers.SentenceTransformer`], :class:`torch.nn.Module`],            Dict[str, Callable[[:class:`~sentence_transformers.SentenceTransformer`]]]], *optional*):
            The loss function to use for training. Can either be a loss class instance, a dictionary mapping
            dataset names to loss class instances, a function that returns a loss class instance given a model,
            or a dictionary mapping dataset names to functions that return a loss class instance given a model.
            In practice, the latter two are primarily used for hyper-parameter optimization. Will default to
            :class:`~sentence_transformers.losses.CoSENTLoss` if no ``loss`` is provided.
        evaluator (Union[:class:`~sentence_transformers.evaluation.SentenceEvaluator`,            List[:class:`~sentence_transformers.evaluation.SentenceEvaluator`]], *optional*):
            The evaluator instance for useful evaluation metrics during training. You can use an ``evaluator`` with
            or without an ``eval_dataset``, and vice versa. Generally, the metrics that an ``evaluator`` returns
            are more useful than the loss value returned from the ``eval_dataset``. A list of evaluators will be
            wrapped in a :class:`~sentence_transformers.evaluation.SequentialEvaluator` to run them sequentially.
        callbacks (List of [:class:`transformers.TrainerCallback`], *optional*):
            A list of callbacks to customize the training loop. Will add those to the list of default callbacks
            detailed in [here](callback).

            If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
        optimizers (`Tuple[:class:`torch.optim.Optimizer`, :class:`torch.optim.lr_scheduler.LambdaLR`]`, *optional*, defaults to `(None, None)`):
            A tuple containing the optimizer and the scheduler to use. Will default to an instance of :class:`torch.optim.AdamW`
            on your model and a scheduler given by :func:`transformers.get_linear_schedule_with_warmup` controlled by `args`.

    Important attributes:

        - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
          subclass.
        - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
          original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
          the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
          model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
        - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
          data parallelism, this means some of the model layers are split on different GPUs).
        - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
          to `False` if model parallel or deepspeed is used, or if the default
          `TrainingArguments.place_model_on_device` is overridden to return `False` .
        - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
          in `train`)

    c                
   t               st        d      |(d}t        j                  d| d       t	        |      }nt        |t              st        d      |%|	|	| _        | j                         }n)t        d      |	t        j                  d       |	| _        |
t        j                  d	       t	        d
      j                         }|j                  r;|j                  j                  s%|j                  j                  |j                         |&t        |j                  t               r|j                  }|t#        t%        |ddd            }t'        ddg||g      D ]U  \  }}t        |t(              s2t        |t*              s't-        d |j/                         D              sHt        d| d       t        |t*              rt        |t0              st1        |      }t        |t*              rt        |t0              st1        |      }| j                  rd n|||||||nd|	|
|||d
}t3        t4              t3        d      k\  r||d<   n||d<   |*|(|j6                  dk7  rt        d|j6                   d      t9        t:        | z  d,i | | j>                  dk(  rd | _        i i d| _         d| _!        i | _"        |  |  |  t-        | jF                  jH                  D cg c]  }t        |tJ               c}      r tL        jN                  jQ                  dd       |n| jR                  jT                  dk(  r+t        j                  d        tW        | jR                        }n*t        j                  d!       tY        | jR                        }t        |t*              r|j[                         D ci c]  \  }}|| j]                  ||       c}}| _/        t'        ddg||g      D ]  \  }}|	t        |t*              st        d"| d#      ta        |jc                               ta        |jc                               z
  x}s^t        d$| d%te        |       d&tg        |      dk(  rd'nd( d)| d*	       n| j]                  ||      | _/        |t        |th              stk        |      }|| _6        | jn                  .| jq                  ||jr                  |jt                  d+      | _7        | j>                  .| jq                  ||jr                  |jt                  d+      | _        | jw                  |       y c c}w c c}}w )-NzTo train a CrossEncoder model, you need to install the `accelerate` and `datasets` modules. You can do so with the `train` extra:
pip install -U "sentence-transformers[train]"tmp_trainerz=No `CrossEncoderTrainingArguments` passed, using `output_dir=z`.)
output_dirzQPlease use `CrossEncoderTrainingArguments` imported from `sentence_transformers`.z<`Trainer` requires either a `model` or `model_init` argumentz`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will overwrite your model when calling the `train` method.z`compute_metrics` is currently not compatible with the CrossEncoderTrainer. Please use the `evaluator` argument instead for detailed evaluation metrics, or the `eval_dataset` argument for the evaluation loss.unusedTpt)padding
truncationreturn_tensors)tokenize_fntrainevalc              3  <   K   | ]  }t        |t                y w)N)
isinstancer   ).0ds     ~/var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/sentence_transformers/cross_encoder/trainer.py	<genexpr>z/CrossEncoderTrainer.__init__.<locals>.<genexpr>   s     1kUV*Q2P1ks   zACrossEncoderTrainer does not support an IterableDataset for the `zg_dataset`. Please convert the dataset to a `Dataset` or `DatasetDict` before passing it to the trainer.dummy)
modelargsdata_collatortrain_dataseteval_dataset
model_initcompute_metrics	callbacks
optimizerspreprocess_logits_for_metricsz4.46.0processing_class	tokenizernoz%You have set `args.eval_strategy` to z, but you didn't provide an `eval_dataset` or an `evaluator`. Either provide an `eval_dataset` or an `evaluator` to `CrossEncoderTrainer`, or set `args.eval_strategy='no'` to skip evaluation.)r)   r*   WANDB_PROJECTzsentence-transformers   zLNo `loss` passed, using `losses.BinaryCrossEntropyLoss` as a default option.zFNo `loss` passed, using `losses.CrossEntropyLoss` as a default option.z,If the provided `loss` is a dict, then the `z"_dataset` must be a `DatasetDict`.z:If the provided `loss` is a dict, then all keys from the `z;_dataset` dictionary must occur in `loss` also. Currently, z occurs z in `z_dataset` but not in `loss`.)promptsrouter_mappingdataset_name )<r   RuntimeErrorloggerinfor   r,   
ValueErrorr7   call_model_initwarningto_dicthub_model_idmodel_card_datamodel_idset_model_idr=   r
   r   r   zipr   dictanyvaluesr   parse_versiontransformers_versioneval_strategysuperr   __init__r6   accum_loss_componentscan_return_loss_prompt_length_mappingcallback_handlerr9   r   osenviron
setdefaultr2   
num_labelsr   r   itemsprepare_losslosssetkeyssortedlenr   r   	evaluatorr5   preprocess_datasetrC   rD   add_model_card_callback)selfr2   r3   r5   r6   re   rj   r4   r=   r7   r8   r9   r:   r;   r"   default_args_dictrE   datasetsuper_kwargscallbackloss_fnmissing	__class__s                         r/   rZ   zCrossEncoderTrainer.__init__e   sX   ( %&@  <&JKKWXbWccefg0JGDD"?@pqq=%",,,."#abb%M )DO&NN' :XNVVX U%:%:%C%C!!..t/@/@AEOO=T!UI 4#It]abM &)'6):]L<Y%Z 	!L''?37D)c1kZaZhZhZj1k.k !WXdWe fs s 	 mT*:m[3Y'6MlD)*\;2W&|4L "__T%**,8,D	HYL_f$."$-J
 -.-2II/8L+,(1L% I$5$:L:LPT:T78J8J7K LG G  	($8H<H' $D 022%>"  $&(#DDYDYDcDcd
8]3deJJ!!/3JK<zz$$)jk-djj9de'

3dD!fjfpfpfrsMb\[bt'8'8%'HHsDI),gv->P\@])^ %g?!'40$F|nTvw  "',,.1C		4DDD7D$TUaTb c&&,Wo%6fCLTUDUS[]<^^cdpcq  rNO  ))$6DI  I?P)Q+I6I")!%!8!8t||DDWDWfm "9 "D ( $ 7 7dll4CVCVek !8 !D 	$$%67S e ts   1U
Uc                    t        |      }| j                  |       |j                  | j                  | j                  | j
                  | j                  |        y)ai  
        Add a callback responsible for automatically tracking data required for the automatic model card generation

        This method is called in the ``__init__`` method of the
        :class:`~sentence_transformers.trainer.SentenceTransformerTrainer` class.

        Args:
            default_args_dict (Dict[str, Any]): A dictionary of the default training arguments, so we can determine
                which arguments have been changed for the model card.

        .. note::

            This method can be overridden by subclassing the trainer to remove/customize this callback in custom uses cases
        )r2   trainerN)r   add_callbackon_init_endr3   statecontrolr2   )rm   rn   model_card_callbacks      r/   rl   z+CrossEncoderTrainer.add_model_card_callback  sL      <<MN-.''		4::t||SWS]S]gk'l    c                `    |j                  dd      }t        |j                               }||fS )zPTurn the inputs from the dataloader into the separate model inputs & the labels.labelN)poplistrU   )rm   inputslabelsfeaturess       r/   collect_featuresz$CrossEncoderTrainer.collect_features0  s/     GT*(r|   c                    ddl m}  ||| j                  j                        }| j                  j	                  |j                                y )Nr   r   )trust_remote_code)#sentence_transformers.cross_encoderr   r2   r   load_state_dict
state_dict)rm   checkpoint_pathr   loaded_models       r/   _load_from_checkpointz)CrossEncoderTrainer._load_from_checkpoint:  s6    D#OtzzGcGcd

""<#:#:#<=r|   c                   	 | j                   j                  x}rC|j                  dd      d   }| j                  j                  j                  t        |             | j                  }| j                  j                  | _        	 t        t        | +         | j                  }|| _        || j                  _        S # t        $ r Y iw xY w# | j                  }|| _        || j                  _        w xY w)N-r@   )ry   best_model_checkpointrsplitr2   rO   set_best_model_stepint	ExceptionrY   r   _load_best_model)rm   
checkpointstep
full_modelloaded_auto_modelrt   s        r/   r   z$CrossEncoderTrainer._load_best_model@  s    	!ZZ===z=!((a04

**>>s4yI ZZ
ZZ%%
	13TKM $

#DJ0DJJ  		 !%

#DJ0DJJs   AB< C <	CC&C1c                     y)ac  
        Return whether the prompt length should be passed to the model's forward method.

        This is never the case for CrossEncoder models, as the prompt length is not used in the forward method,
        unlike with Sentence Transformers models, where it may be relevant to mask out the prompt tokens in the
        embedding pooling step.
        FrF   )rm   s    r/   _include_prompt_lengthz*CrossEncoderTrainer._include_prompt_lengthS  s     r|   )NNNNNNNNNNN)NNN)r2   zCrossEncoder | Noner3   z$CrossEncoderTrainingArguments | Noner5   1Dataset | DatasetDict | dict[str, Dataset] | Noner6   r   re   znn.Module | dict[str, nn.Module] | Callable[[CrossEncoder], torch.nn.Module] | dict[str, Callable[[CrossEncoder], torch.nn.Module]] | Nonerj   z2SentenceEvaluator | list[SentenceEvaluator] | Noner4   zDataCollator | Noner=   z)PreTrainedTokenizerBase | Callable | Noner7   z!Callable[[], CrossEncoder] | Noner8   z'Callable[[EvalPrediction], dict] | Noner9   zlist[TrainerCallback] | Noner:   z?tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]r;   z;Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | NonereturnNone)rn   zdict[str, Any]r   r   )r   zdict[str, torch.Tensor | Any]r   z9tuple[list[dict[str, torch.Tensor]], torch.Tensor | None])r   strr   r   )r   r   )r   bool)__name__
__module____qualname____doc__rZ   rl   r   r   r   r   __classcell__)rt   s   @r/   r   r      s   CN &*59KOJN
 HL-1?C8<CG26Vbei%u8"u8 3u8 I	u8
 Hu8u8 Fu8 +u8 =u8 6u8 Au8  0!u8" T#u8$ (c%u8& 
'u8nm( 3 	B >1&r|   r   )3
__future__r   loggingr_   	functoolsr   typingr   r   torchpackaging.versionr   rV   r   transformersr	   r
   r   r   rW   transformers.data.data_collatorr   transformers.integrationsr   r   r   1sentence_transformers.cross_encoder.data_collatorr   *sentence_transformers.cross_encoder.lossesr   r   .sentence_transformers.cross_encoder.model_cardr   1sentence_transformers.cross_encoder.training_argsr    sentence_transformers.evaluationr   r   sentence_transformers.trainerr   sentence_transformers.utilr   r   datasetsr   r   r   	getLoggerr   rH   r   rF   r|   r/   <module>r      sq    "  	     4  Q Q < 8 3 < V _ X [ S D S>>			8	$|4 |r|   