
    rhK                     h   d dl Z d dlmZ d dlmZmZmZmZmZ d dl	m
Z
mZ eeegdgdddddddd	eegg d	dd
dgddddgZej                  ej                  ej                  ej                   ej"                  gd
ddgddddddej                  ej                  ej                  ej                   ej"                  gdgddddddj%                  dj'                               dgZe j*                  j-                  de      ed               Ze j*                  j-                  de      ed               Zy)    N)metrics)BaggingClassifierBaggingRegressorIsolationForestStackingClassifierStackingRegressor)assert_docstring_consistencyskip_if_no_numpydocmax_samplesFz4The number of samples to draw from X to train each.*)	objectsinclude_paramsexclude_paramsinclude_attrsexclude_attrsinclude_returnsexclude_returnsdescr_regex_patternignore_types)cvn_jobspassthroughverboseTfinal_estimator_)r   r   r   r   r   r   r   r   averagezero_division a/  This parameter is required for multiclass/multilabel targets\.
            If ``None``, the metrics for each class are returned\. Otherwise, this
            determines the type of averaging performed on the data:
            ``'binary'``:
                Only report results for the class specified by ``pos_label``\.
                This is applicable only if targets \(``y_\{true,pred\}``\) are binary\.
            ``'micro'``:
                Calculate metrics globally by counting the total true positives,
                false negatives and false positives\.
            ``'macro'``:
                Calculate metrics for each label, and find their unweighted
                mean\.  This does not take label imbalance into account\.
            ``'weighted'``:
                Calculate metrics for each label, and find their average weighted
                by support \(the number of true instances for each label\)\. This
                alters 'macro' to account for label imbalance; it can result in an
                F-score that is not between precision and recall\.[\s\w]*\.*
            ``'samples'``:
                Calculate metrics for each instance, and find their average \(only
                meaningful for multilabel classification where this differs from
                :func:`accuracy_score`\)\.casec                     t        di |  y)z@Check docstrings parameters consistency between related classes.N r	   r   s    /var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/sklearn/tests/test_docstring_parameters_consistency.py test_class_docstring_consistencyr#   f        !(4(    c                     t        di |  y)zBCheck docstrings parameters consistency between related functions.Nr   r    r!   s    r"   #test_function_docstring_consistencyr'   m   r$   r%   )pytestsklearnr   sklearn.ensembler   r   r   r   r   sklearn.utils._testingr	   r
   !CLASS_DOCSTRING_CONSISTENCY_CASESprecision_recall_fscore_supportf1_scorefbeta_scoreprecision_scorerecall_scorejoinsplit$FUNCTION_DOCSTRING_CONSISTENCY_CASESmarkparametrizer#   r'   r   r%   r"   <module>r7      s      U &'7I(/ V&
 '(9:D,- #	% !6 33##  
 $o6 #$ 33##  
 %+ "xx.. eg3 
)#;( $| !BC)  D)
 !EF)  G)r%   