
    hM5                        d Z ddlZddlmZmZ ddlmZmZmZm	Z	m
Z
mZmZmZmZmZ ddlZddlmZ ddlmZmZ ddlmZmZ ddlmZmZmZmZmZ erdd	lm Z  dd
l!m"Z" ddl#m$Z$m%Z% er	ddl&m'Z( e(Z)neZ) G d de*      Z+ G d de      Z,y)z*
Common base config for all LLM providers
    N)ABCabstractmethod)
TYPE_CHECKINGAnyAsyncIteratorIteratorListOptionalTupleTypeUnioncast)	BaseModel)DEFAULT_MAX_TOKENSRESPONSE_FORMAT_TOOL_NAME)AsyncHTTPHandlerHTTPHandler)AllMessageValues%ChatCompletionToolChoiceFunctionParam#ChatCompletionToolChoiceObjectParamChatCompletionToolParam$ChatCompletionToolParamFunctionChunk)CustomStreamWrapper)ModelResponse   )!map_developer_role_to_system_roletype_to_response_format_param)Loggingc                        e Zd Z	 	 	 	 ddededeeeej                  f      deej                     deej                     dee   f fdZ xZS )	BaseLLMExceptionstatus_codemessageheadersrequestresponsebodyc                    || _         || _        || _        |r|| _        nt	        j
                  dd      | _        |r|| _        n&t	        j                  || j                        | _        || _        t        | )  | j                         y )NPOSTzhttps://docs.litellm.ai/docs)methodurl)r!   r$   )r!   r"   r#   r$   httpxRequestr%   Responser&   super__init__)selfr!   r"   r#   r$   r%   r&   	__class__s          e/var/www/Befach/backend/env/lib/python3.12/site-packages/litellm/llms/base_llm/chat/transformation.pyr/   zBaseLLMException.__init__3   s}     '#"DL ==#ADL $DM!NN'DM 	LL	
    NNNN)__name__
__module____qualname__intstrr
   r   dictr+   Headersr,   r-   r/   __classcell__)r1   s   @r2   r    r    2   s~    
 9=+/-1#

 
 %emm 345	

 %--(
 5>>*
 tn
 
r3   r    c                      e Zd Zd Zed        Zdeeee	   e
f      dee
   fdZde
defdZde
defdZde
d	e
fd
Z	 d@dee   dee   dee   defdZd	e
dede
fdZdee   dee   fdZdej.                  de
defdZdej.                  de
de
fdZedefd       Zededefd       Z	 dAd	e
de
dedede
f
dZ ede
d	e
dedede
f
d        Z!e	 	 dBd!e
dedee   d	e
de
d"ee   d#ee   de
fd$       Z"	 	 	 	 dCd!e
d	e
de
d#ed"ee   dee   dee   d%ee   de#e
ee$   f   fd&Z%	 d@d#ee   d"ee   ded	e
de
dee   defd'Z&ededee   d	e
de
d!e
de
fd(       Z'dedee   d	e
de
d!e
de
fd)Z(e	 	 dBded*ejR                  d+d,d-e*de
dee   d	e
de
d.e+d"ee   d/ee   dd,fd0       Z,ed1ed2ed!ee
ejZ                  f   de.fd3       Z/	 dDd4ee0e   e1e   d,f   d5ed/ee   de+fd6Z2	 	 	 dEdeded-e*d#ed!e
d7e
ded8ee3   d/ee   d9ee$   dd:fd;Z4	 	 	 dEdeded-e*d#ed!e
d7e
ded8eee5e3f      d/ee   d9ee$   dd:fd<Z6edee   fd=       Z7edefd>       Z8edefd?       Z9y)F
BaseConfigc                      y N r0   s    r2   r/   zBaseConfig.__init__R   s    r3   c                 F   | j                   j                         D ci c]v  \  }}|j                  d      s`|j                  d      sO|j                  d      s>t        |t        j
                  t        j                  t        t        t        f      s|||x c}}S c c}}w )N___abc_is_base_class)
__dict__items
startswith
isinstancetypesFunctionTypeBuiltinFunctionTypeclassmethodstaticmethodproperty)clskvs      r2   
get_configzBaseConfig.get_configU   s     **,
1<<%LL(LL!12&&-- 	  qD
 	
 
s   A;Bresponse_formatreturnc                     t        |      S )N)rU   )r   )r0   rU   s     r2   $get_json_schema_from_pydantic_objectz/BaseConfig.get_json_schema_from_pydantic_objectj   s     -_MMr3   non_default_paramsc                 t    |j                  di       j                  d      dk(  xs |j                  d      d uS )Nthinkingtypeenabledreasoning_effort)getr0   rY   s     r2   is_thinking_enabledzBaseConfig.is_thinking_enabledo   sA    "":r266v>)K F!%%&89E	
r3   c                     d|v xs d|v S )zY
        OpenAI spec allows max_tokens or max_completion_tokens to be specified.
        
max_tokensmax_completion_tokensrA   r`   s     r2   is_max_tokens_in_requestz#BaseConfig.is_max_tokens_in_requestu   s     
 .. =&*<<	
r3   optional_paramsc                     | j                  |      }|r8d|vr3t        t        |d         j                  dd      }||t        z   |d<   yyyy)a  
        Handles scenario where max tokens is not specified. For anthropic models (anthropic api/bedrock/vertex ai), this requires having the max tokens being set and being greater than the thinking token budget.

        Checks 'non_default_params' for 'thinking' and 'max_tokens'

        if 'thinking' is enabled and 'max_tokens' is not specified, set 'max_tokens' to the thinking token budget + DEFAULT_MAX_TOKENS
        rc   r[   budget_tokensN)ra   r   r:   r_   r   )r0   rY   rf   ra   thinking_token_budgets        r2   +update_optional_params_with_thinking_tokensz6BaseConfig.update_optional_params_with_thinking_tokens~   sk     #66G<7I#I$(z/J$K$O$O%! %0),>>  - 1	 $Jr3   Nmodelstreamcustom_llm_providerc                      y)zG
        Returns True if the model/provider should fake stream
        FrA   )r0   rk   rl   rm   s       r2   should_fake_streamzBaseConfig.should_fake_stream   s     r3   toolsc                 4    d|vr||d<   |S g |d   ||d<   |S )z>
        Helper util to add tools to optional_params.
        rp   rA   )r0   rf   rp   s      r2   _add_tools_to_optional_paramsz(BaseConfig._add_tools_to_optional_params   sG     /)',OG$ 	( )((OG$ r3   messagesc                     t        |      S )zz
        Translate `developer` role to `system` role for non-OpenAI providers.

        Overriden by OpenAI/Azure
        )rs   )r   )r0   rs   s     r2   'translate_developer_role_to_system_rolez2BaseConfig.translate_developer_role_to_system_role   s     1(CCr3   elitellm_paramsc                      y)z
        Returns True if the model/provider should retry the LLM API on UnprocessableEntityError

        Overriden by azure ai - where different models support different parameters
        FrA   )r0   rv   rw   s      r2   9should_retry_llm_api_inside_llm_translation_on_http_errorzDBaseConfig.should_retry_llm_api_inside_llm_translation_on_http_error   s     r3   request_datac                     |S )zH
        Transform the request data on UnprocessableEntityError
        rA   )r0   rv   rz   s      r2   /transform_request_on_unprocessable_entity_errorz:BaseConfig.transform_request_on_unprocessable_entity_error   s
     r3   c                      y)z
        Returns the max retry count for UnprocessableEntityError

        Used if `should_retry_llm_api_inside_llm_translation_on_http_error` is True
        r   rA   rB   s    r2   'max_retry_on_unprocessable_entity_errorz2BaseConfig.max_retry_on_unprocessable_entity_error   s     r3   c                      y r@   rA   )r0   rk   s     r2   get_supported_openai_paramsz&BaseConfig.get_supported_openai_params   s    r3   valueis_response_format_supportedenforce_tool_choicec                 "   d}d|v r|d   }nd|v r|d   d   }|rm|skt        dt        t                    }t        dt	        t        |            }|j                  d	g        |d	   j                  |       |r||d
<   d|d<   |S |r||d<   |S )u  
        Follow similar approach to anthropic - translate to a single tool call.

        When using tools in this way: - https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode
        - You usually want to provide a single tool
        - You should set tool_choice (see Forcing tool use) to instruct the model to explicitly use that tool
        - Remember that the model will pass the input to the tool, so the name of the tool and description should be from the model’s perspective.

        Add response format to tools

        This is used to translate response_format to a tool call, for models/APIs that don't support response_format directly.
        Nresponse_schemajson_schemaschemafunction)name)r\   r   )r   
parametersrp   tool_choiceT	json_moderU   )r   r   r   r   r   
setdefaultappend)r0   rf   r   r   r   r   _tool_choice_tools           r2   _add_response_format_to_toolsz(BaseConfig._add_response_format_to_tools   s    & '+% 12Ke#.x8K;>>2L ,=2{E &&w3G$++E2"1=.+/OK(  *16O-.r3   drop_paramsc                      y r@   rA   )r0   rY   rf   rk   r   s        r2   map_openai_paramszBaseConfig.map_openai_params  s     	r3   r#   api_keyapi_basec                      y r@   rA   )r0   r#   rk   rs   rf   rw   r   r   s           r2   validate_environmentzBaseConfig.validate_environment  s     	r3   fake_streamc	                 
    |dfS )a0  
        Some providers like Bedrock require signing the request. The sign request funtion needs access to `request_data` and `complete_url`
        Args:
            headers: dict
            optional_params: dict
            request_data: dict - the request body being sent in http request
            api_base: str - the complete url being sent in http request
        Returns:
            dict - the signed headers

        Update the headers with the signed headers in this function. The return values will be sent as headers in the http request.
        NrA   )	r0   r#   rf   rz   r   r   rk   rl   r   s	            r2   sign_requestzBaseConfig.sign_request  s    . }r3   c                      |t        d      |S )z{
        OPTIONAL

        Get the complete url for the request

        Some providers need `model` in `api_base`
        zapi_base is required)
ValueError)r0   r   r   rk   rf   rw   rl   s          r2   get_complete_urlzBaseConfig.get_complete_url6  s      344r3   c                      y r@   rA   r0   rk   rs   rf   rw   r#   s         r2   transform_requestzBaseConfig.transform_requestJ  s     	r3   c                 6   K   | j                  |||||      S w)z
        Override to allow for http requests on async calls - e.g. converting url to base64

        Currently only used by openai.py
        )rk   rs   rf   rw   r#   )r   r   s         r2   async_transform_requestz"BaseConfig.async_transform_requestU  s/      %%+) & 
 	
s   raw_responsemodel_responser   logging_objencodingr   c                      y r@   rA   )r0   rk   r   r   r   rz   rs   rf   rw   r   r   r   s               r2   transform_responsezBaseConfig.transform_responsej  s     	r3   error_messager!   c                      y r@   rA   )r0   r   r!   r#   s       r2   get_error_classzBaseConfig.get_error_class{  s     	r3   streaming_responsesync_streamc                      y r@   rA   )r0   r   r   r   s       r2   get_model_response_iteratorz&BaseConfig.get_model_response_iterator  s     	r3   dataclientsigned_json_bodyr   c                    K   t         wr@   NotImplementedErrorr0   rk   rm   r   r   r#   r   rs   r   r   r   s              r2   get_async_custom_stream_wrapperz*BaseConfig.get_async_custom_stream_wrapper  s      "!s   	c                     t         r@   r   r   s              r2   get_sync_custom_stream_wrapperz)BaseConfig.get_sync_custom_stream_wrapper  s
     "!r3   c                      y r@   rA   rB   s    r2   rm   zBaseConfig.custom_llm_provider  s    r3   c                      y)NFrA   rB   s    r2   has_custom_stream_wrapperz$BaseConfig.has_custom_stream_wrapper  s    r3   c                      y)z
        Some providers like Bedrock invoke do not support the stream parameter in the request body.

        By default, this is true for almost all providers.
        TrA   rB   s    r2   %supports_stream_param_in_request_bodyz0BaseConfig.supports_stream_param_in_request_body  s     r3   r@   )T)NNr4   )F)NNN):r5   r6   r7   r/   rN   rT   r
   r   r   r   r:   rX   boolra   re   rj   r9   ro   r	   rr   r   ru   r+   HTTPStatusErrorry   r|   rP   r8   r~   r   listr   r   r   r   r   bytesr   r   r   r   r-   LiteLLMLoggingObjr   r   r;   r    r   r   r   r   r   r   r   r   rm   r   r   rA   r3   r2   r>   r>   Q   s    
 
(N'd9ot.C(DEN	$N

d 
t 

4 
D 
"&9=0 .2		}	 	 &c]		
 
	T $ SW 	D'(	D 
		D&&8<	&&6:	        %)00 0 '+	0
 "0 
0d    	
  
   "&"&

 
 '(	

 
 
 #
 3-
 

 
$ "&#!%&*  	
  # }  d^ 
tXe_$	%@ "&3- # 	
    
(  '( 	
   
 

 '(
 	

 
 
 

*  "&$( nn (	
 '  '(    # D> 
    /2=B4CV=W	  %*	!(3-s1C_"TU  D>	
 
" .2$(,0"" !" '	"
 " " " " )*" D>" #5/" 
"0 BF$(,0"" !" '	"
 " " " " {,<<=>" D>" #5/" 
" Xc]   4   t  r3   r>   )-__doc__rK   abcr   r   typingr   r   r   r   r	   r
   r   r   r   r   r+   pydanticr   litellm.constantsr   r   &litellm.llms.custom_httpx.http_handlerr   r   litellm.types.llms.openair   r   r   r   r   ,litellm.litellm_core_utils.streaming_handlerr   litellm.types.utilsr   
base_utilsr   r   *litellm.litellm_core_utils.litellm_loggingr   _LiteLLMLoggingObjr   	Exceptionr    r>   rA   r3   r2   <module>r      sw     #     K P  P1
 X*
y 
>e er3   