
    rh[                        d dl mZ d dlZd dlZd dlmZ d dlmZ d dlm	Z	m
Z
mZ  ej                  e      ZddZddZy)	    )annotationsN)Path)PretrainedConfig)_save_pretrained_wrapperbackend_should_exportbackend_warn_to_savec                0   	 ddl }ddlm}m}m}m} |||d}	||	vr0dj                  |	j                               }
t        d| d|
       |	|   }|j                  d	|j                         d         |d	<   t        |       }|j                         }d
}d}t        ||||||      \  }}|r|j                  dd        |j                   | f||d|}t#        |j$                  d      |_        |rt'        | ||       |S # t        $ r t        d      w xY w)a  
    Load and perhaps export an ONNX model using the Optimum library.

    Args:
        model_name_or_path (str): The model name on Hugging Face (e.g. 'naver/splade-cocondenser-ensembledistil')
            or the path to a local model directory.
        config (PretrainedConfig): The model configuration.
        task_name (str): The task name for the model (e.g. 'feature-extraction', 'fill-mask', 'sequence-classification').
        model_kwargs (dict): Additional keyword arguments for the model loading.
    r   N)ONNX_WEIGHTS_NAMEORTModelForFeatureExtractionORTModelForMaskedLM!ORTModelForSequenceClassificationzfeature-extractionz	fill-maskzsequence-classification, Unsupported task: . Supported tasks: zUsing the ONNX backend requires installing Optimum and ONNX Runtime. You can install them with pip: `pip install optimum[onnxruntime]` or `pip install optimum[onnxruntime-gpu]`providerONNXz*.onnx	file_nameconfigexportonnx	subfolder)onnxruntimeoptimum.onnxruntimer
   r   r   r   joinkeys
ValueErrorModuleNotFoundError	Exceptionpopget_available_providersr   existsr   from_pretrainedr   _save_pretrainedr   )model_name_or_pathr   	task_namemodel_kwargsortr
   r   r   r   task_to_model_mappingsupported_tasks	model_cls	load_pathis_localbackend_nametarget_file_globr   models                     u/var/www/html/ai-insurance-compliance-backend/venv/lib/python3.12/site-packages/sentence_transformers/backend/load.pyload_onnx_modelr4      sq   
!	
 	
 #?,'H!
 11"ii(=(B(B(DEO1)<OP_O`abb))4	  ,//
C<W<W<YZ[<\]L'(I!HL 18\+<>NP\FL
 d+ &I%% 	E 6e6L6LX^_E /<HLQ  
8
 	

s   AD   Dc                   	 ddl m}m}m}m} |||d}||vr0dj                  |j                               }	t        d| d|	       ||   }
t        |       }|j                         }d}d	}t        ||||||      \  }}|r|j                  d
d       d|v rh|d   }t        |t              sXt        |      j                         st        d      t!        |d      5 }t#        j$                  |      |d<   ddd       ni |d<    |
j&                  | f||d|}t)        |j*                  d      |_        |rt-        | ||       |S # t        $ r t        d      w xY w# 1 sw Y   dxY w)a  
    Load and perhaps export an OpenVINO model using the Optimum library.

    Args:
        model_name_or_path (str): The model name on Hugging Face (e.g. 'naver/splade-cocondenser-ensembledistil')
            or the path to a local model directory.
        config (PretrainedConfig): The model configuration.
        task_name (str): The task name for the model (e.g. 'feature-extraction', 'fill-mask', 'sequence-classification').
        model_kwargs (dict): Additional keyword arguments for the model loading.
    r   )OV_XML_FILE_NAMEOVModelForFeatureExtractionOVModelForMaskedLM OVModelForSequenceClassificationr   r   r   r   zUsing the OpenVINO backend requires installing Optimum and OpenVINO. You can install them with pip: `pip install optimum[openvino]`OpenVINOzopenvino*.xmlr   N	ov_configzXov_config should be a dictionary or a path to a .json file containing an OpenVINO configzutf-8)encodingr   openvinor   )optimum.intel.openvinor6   r7   r8   r9   r   r   r   r    r!   r   r$   r   r"   
isinstancedictopenjsonloadr%   r   r&   r   )r'   r   r(   r)   r6   r7   r8   r9   r+   r,   r-   r.   r/   r0   r1   r   r;   fr2   s                      r3   load_openvino_modelrE   Z   s   
	
 	
 #>+'G!
 11"ii(=(B(B(DEO1)<OP_O`abb))4	 '(I!HL& 18\+;=M|FL
 d+ l" -	)T*	?))+ n  i'2 9a,0IIaL[)9 9 %'[! &I%% 	E 6e6L6LXbcE /<HLc  
M
 	

:9 9s   AE EEE()r'   strr   r   r(   rF   )
__future__r   rB   loggingpathlibr    transformers.configuration_utilsr   #sentence_transformers.backend.utilsr   r   r   	getLogger__name__loggerr4   rE        r3   <module>rQ      s9    "    = u u			8	$IXQrP   