
    rh6                        d dl mZ d dlmZmZ d dlmZ d dlmZ d dl	m
Z
 d dlZd dlZd dlmZmZ d dlmZmZ d d	lmZ d d
lmZ d dlmZ 	 	 	 	 	 	 	 	 ddZ G d dej2                        Zy)    )annotations)IterableIterator)nullcontext)partial)AnyN)Tensornn)SentenceTransformerutil)RandContext)StaticEmbedding)all_gather_with_grad+CachedMultipleNegativesSymmetricRankingLossc           
        |j                   J |j                  J t        j                         5  t	        ||j                   |j                        D ]u  \  }}}t	        |j                  |dd|      |      D ]M  \  \  }}}t        j                  |j                         |j                               | z  }	|	j                          O w 	 ddd       y# 1 sw Y   yxY w)zOA backward hook to backpropagate the cached gradients mini-batch by mini-batch.NTF)sentence_feature	with_gradcopy_random_staterandom_states)	cacher   torchenable_gradzipembed_minibatch_iterdotflattenbackward)
grad_outputsentence_featuresloss_objr   gradr   reps_mb_grad_mb	surrogates
             /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/sentence_transformers/losses/CachedMultipleNegativesSymmetricRankingLoss.py_backward_hookr'      s     >>%%%!!---				 %589JHNN\d\r\r5s 	%1dM),--%5"&+"/	 .  * 
%%!g "IIgoo&79JKkY	""$
%	%% % %s   BCCc                       e Zd Zdej                  dddf	 	 	 	 	 	 	 	 	 	 	 	 	 d fdZ	 d	 	 	 	 	 	 	 	 	 	 	 	 	 ddZ	 d	 	 	 	 	 	 	 	 	 ddZddZdddZ	dd	Z
dd
Z xZS )r   g      4@    Fc                    t         |           t        |d   t              rt	        d      || _        || _        || _        || _        || _	        || _
        t        j                         | _        d| _        d| _        y)a  
        Boosted version of :class:`MultipleNegativesSymmetricRankingLoss` (MNSRL) by GradCache (https://arxiv.org/pdf/2101.06983.pdf).

        Given a list of (anchor, positive) pairs, MNSRL sums the following two losses:

        1. Forward loss: Given an anchor, find the sample with the highest similarity out of all positives in the batch.
        2. Backward loss: Given a positive, find the sample with the highest similarity out of all anchors in the batch.

        For example with question-answer pairs, the forward loss finds the answer for a given question and the backward loss
        finds the question for a given answer. This loss is common in symmetric tasks, such as semantic textual similarity.

        The caching modification allows for large batch sizes (which give a better training signal) with constant memory usage,
        allowing you to reach optimal training signal with regular hardware.

        Note: If you pass triplets, the negative entry will be ignored. An anchor is just searched for the positive.

        Args:
            model: SentenceTransformer model
            scale: Output of similarity function is multiplied by scale value. In some literature, the scaling parameter
                is referred to as temperature, which is the inverse of the scale. In short: scale = 1 / temperature, so
                scale=20.0 is equivalent to temperature=0.05.
            similarity_fct: similarity function between sentence embeddings. By default, cos_sim. Can also be set to dot
                product (and then set scale to 1)
            mini_batch_size: Mini-batch size for the forward pass, this denotes how much memory is actually used during
                training and evaluation. The larger the mini-batch size, the more memory efficient the training is, but
                the slower the training will be. It's recommended to set it as high as your GPU memory allows. The default
                value is 32.
            gather_across_devices: If True, gather the embeddings across all devices before computing the loss.
                Recommended when training on multiple GPUs, as it allows for larger batch sizes, but it may slow down
                training due to communication overhead, and can potentially lead to out-of-memory errors.
            show_progress_bar: If True, a progress bar for the mini-batches is shown during training. The default is False.

        Requirements:
            1. (anchor, positive) pairs
            2. Should be used with large batch sizes for superior performance, but has slower training time than non-cached versions

        Inputs:
            +---------------------------------------+--------+
            | Texts                                 | Labels |
            +=======================================+========+
            | (anchor, positive) pairs              | none   |
            +---------------------------------------+--------+

        Recommendations:
            - Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
              ensure that no in-batch negatives are duplicates of the anchor or positive samples.

        Relations:
            - Like :class:`MultipleNegativesRankingLoss`, but with an additional symmetric loss term and caching mechanism.
            - Inspired by :class:`CachedMultipleNegativesRankingLoss`, adapted for symmetric loss calculation.

        Example:
            ::

                from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
                from datasets import Dataset

                model = SentenceTransformer("microsoft/mpnet-base")
                train_dataset = Dataset.from_dict({
                    "anchor": ["It's nice weather outside today.", "He drove to work."],
                    "positive": ["It's so sunny.", "He took the car to the office."],
                })
                loss = losses.CachedMultipleNegativesSymmetricRankingLoss(model, mini_batch_size=32)

                trainer = SentenceTransformerTrainer(
                    model=model,
                    train_dataset=train_dataset,
                    loss=loss,
                )
                trainer.train()

        References:
            - Efficient Natural Language Response Suggestion for Smart Reply, Section 4.4: https://arxiv.org/pdf/1705.00652.pdf
            - Scaling Deep Contrastive Learning Batch Size under Memory Limited Setup: https://arxiv.org/pdf/2101.06983.pdf
        r   zCachedMultipleNegativesSymmetricRankingLoss is not compatible with a SentenceTransformer model based on a StaticEmbedding. Consider using MultipleNegativesSymmetricRankingLoss instead.N)super__init__
isinstancer   
ValueErrormodelscalesimilarity_fctmini_batch_sizegather_across_devicesshow_progress_barr
   CrossEntropyLosscross_entropy_lossr   r   )selfr/   r0   r1   r2   r3   r4   	__class__s          r&   r,   z4CachedMultipleNegativesSymmetricRankingLoss.__init__*   s    h 	eAh0P 
 

,.%:"!2"$"5"5"704
=A    c                v   |rt         nt        j                  }|
t               n|}|j                         D 	
ci c]  \  }	}
|	|
||  }}	}
|5   |       5  |rt	        |j                          nd}| j                  |      d   }ddd       ddd       |fS c c}
}	w # 1 sw Y   xY w# 1 sw Y   |fS xY w)z Embed a mini-batch of sentences.Nsentence_embedding)r   r   no_graditemsr   valuesr/   )r7   r   beginendr   r   random_stategrad_contextrandom_state_contextkvsentence_feature_minibatchrepss                r&   embed_minibatchz;CachedMultipleNegativesSymmetricRankingLoss.embed_minibatch   s     '0{U]]0<0D{},BRBXBXBZ%[$!Qa5o%["%[! 	T TTe{,F,M,M,OPkozz"<=>RST	T \!! &\T T	T \!!s)   BB,/B B, B)	%B,,B8c           
   #     K   |d   }|j                   \  }}t        t        j                  d|| j                  d| j
                               D ];  \  }}	|	| j                  z   }
| j                  ||	|
|||dn||         \  }}||f = yw)z5Iterate over mini-batches of sentences for embedding.	input_idsr   zEmbed mini-batchesdescdisableN)r   r?   r@   r   r   rA   )shape	enumeratetqdmtranger2   r4   rH   )r7   r   r   r   r   rJ   
batch_sizer#   ir?   r@   rG   rA   s                r&   r   z@CachedMultipleNegativesSymmetricRankingLoss.embed_minibatch_iter   s      -[9	!
A!KK$$) 222
 	%HAu $...C!%!5!5!1#"3%2%:Ta@P "6 "D, $$%	%s   BBc                    | j                  |d      }|j                         j                         }|D cg c]  }|D cg c]  }|j                   c} c}}| _        |S c c}w c c}}w )z1Calculate the symmetric loss and cache gradients.T)with_backward)calculate_lossdetachrequires_grad_r!   r   )r7   rG   lossrsrs        r&   "calculate_loss_and_cache_gradientszNCachedMultipleNegativesSymmetricRankingLoss.calculate_loss_and_cache_gradients   sY    ""4t"<{{}++-59:rr*!qvv*:
 +:s   	A( A#A(#A(c                F   t        j                  |d         }|dd D cg c]  }t        j                  |       }}t        |      }d}| j                  rdt	        |      }|D cg c]  }t	        |       }}t         j
                  j                         r#t         j
                  j                         }	|	|z  }t        j                  |d      }t        j                  |||z   |j                        }
g }t        j                  d|| j                  d| j                         D ]  }t        || j                  z   |      }| j                  |||z   ||z    |      | j                   z  }| j#                  ||
||       }| j                  |||z   ||z    |      | j                   z  }| j#                  ||
||       }||z   dz  }|t        |      z  |z  }|r |j%                          |j'                         }|j)                  |        t+        |      }|S c c}w c c}w )	zHCalculate the symmetric loss without caching gradients (for evaluation).r      N)dim)devicezCalculating lossrK      )r   catlenr3   r   distributedis_initializedget_rankaranger`   rP   rQ   r2   r4   minr1   r0   r6   r   rW   appendsum)r7   rG   rU   anchorsr[   
candidatesrR   offsetembedding_columnranklabelslossesr?   r@   forward_scoresforward_lossbackward_scoresbackward_lossloss_mbatchrY   s                       r&   rV   z:CachedMultipleNegativesSymmetricRankingLoss.calculate_loss   s'   ))DG$,0H5qeiil5
5\
%% +73GYcdEU./?@dJd   //1((113
*YYzq1

 ffz&9'..Q%'[[  #...
 	'E ed222J?C##GFUNVc\$JJWZ^ZdZdd  *.)@)@QWX]^aQb)cL"11*Ve^fWZl2[]dehlhrhrrO*.*A*A/SYZ_`cSd*eM'-71<K%N(;;jHK$$&)002MM+&+	'. 6{c 6 es   H&Hc                   g }g | _         |D ]  }g }g }| j                  |dd      D ]C  \  }}|j                  |j                         j	                                |j                  |       E |j                  |       | j                   j                  |        t        j                         r4| j                  |      }	|	j                  t        t        ||              |	S | j                  |      }	|	S )z"Forward pass of the loss function.FT)r   r   r   )r   r    )r   r   ri   rW   rX   r   is_grad_enabledr\   register_hookr   r'   rV   )
r7   r   rp   rG   r   reps_mbsrandom_state_mbsr"   rA   rY   s
             r&   forwardz3CachedMultipleNegativesSymmetricRankingLoss.forward   s     1 	8H!)-)B)B!1"& *C * 6%
  0 ? ? AB ''56 KK!%%&67	8   "::4@Dw~IZeijk  &&t,Dr9   c                t    | j                   | j                  j                  | j                  | j                  dS )z+Get the configuration of the loss function.)r0   r1   r2   r3   )r0   r1   __name__r2   r3   )r7   s    r&   get_config_dictz;CachedMultipleNegativesSymmetricRankingLoss.get_config_dict  s6     ZZ"11::#33%)%?%?	
 	
r9   )r/   r   r0   floatr1   z"callable[[Tensor, Tensor], Tensor]r2   intr3   boolr4   r   returnNone)N)r   dict[str, Tensor]r?   r   r@   r   r   r   r   r   rA   zRandContext | Noner   z!tuple[Tensor, RandContext | None])
r   r   r   r   r   r   r   zlist[RandContext] | Noner   z+Iterator[tuple[Tensor, RandContext | None]])rG   list[list[Tensor]]r   r	   )F)rG   r   rU   r   r   r	   )r   Iterable[dict[str, Tensor]]rp   r	   r   r	   )r   zdict[str, Any])r~   
__module____qualname__r   cos_simr,   rH   r   r\   rV   r|   r   __classcell__)r8   s   @r&   r   r   )   s    =A\\!&+"'dB"dB dB ;	dB
 dB  $dB  dB 
dBZ ,0"+" " 	"
 "  " )" 
+"0 37%+% %  	%
 0% 
5%<4l2
r9   )r   r	   r   r   r    r   r   r   )
__future__r   collections.abcr   r   
contextlibr   	functoolsr   typingr   r   rP   r	   r
   sentence_transformersr   r   ?sentence_transformers.losses.CachedMultipleNegativesRankingLossr   sentence_transformers.modelsr   sentence_transformers.utilr   r'   Moduler    r9   r&   <module>r      sd    " . "      ; W 8 ;%%2% :% 
	%.w
")) w
r9   