
    h{                     n    d Z ddlmZmZmZmZ ddlmZmZ ddl	m
Z
 ddlmZ erddlmZ  G d d	e      Zy
)zQ
Translate from OpenAI's `/v1/chat/completions` to VLLM's `/v1/chat/completions`
    )TYPE_CHECKINGListOptionalTuple)get_secret_boolget_secret_str)LiteLLM_Params   )OpenAIGPTConfig)AllMessageValuesc                       e Zd Zdedef fdZdededededef
dZde	e   d	e	e   de
e	e   e	e   f   fd
Z	 dd	e	e   de	e   dee   f fdZedd	e	e   de	e   fd       Ze	 dde	e   fd       Ze	 ddede	e   d	e	e   de
eee	e   e	e   f   fd       Zdeded   dedededefdZdeded   dedededefdZ xZS )LiteLLMProxyChatConfigmodelreturnc                 j    t         |   |      }|j                  d       |j                  d       |S )Nthinkingreasoning_effort)superget_supported_openai_paramsappend)selfr   params_list	__class__s      j/var/www/Befach/backend/env/lib/python3.12/site-packages/litellm/llms/litellm_proxy/chat/transformation.pyr   z2LiteLLMProxyChatConfig.get_supported_openai_params   s5    g9%@:&-.    non_default_paramsoptional_paramsdrop_paramsc                     | j                  |      }|j                         D ]*  \  }}|dk(  r||j                  di       d<   !||v s&|||<   , |S )Nr   
extra_body)r   items
setdefault)r   r   r   r   r   supported_openai_paramsparamvalues           r   _map_openai_paramsz)LiteLLMProxyChatConfig._map_openai_params   sf     #'"B"B5"I.446LE5
"KP**<<ZH11).&	 7
 r   api_baseapi_keyc                 F    |xs t        d      }|xs t        d      }||fS )NLITELLM_PROXY_API_BASELITELLM_PROXY_API_KEYr   )r   r'   r(   dynamic_api_keys       r   $_get_openai_compatible_provider_infoz;LiteLLMProxyChatConfig._get_openai_compatible_provider_info&   s/     G~.FG!L^4K%L((r   c                     | j                  ||      \  }}|t        d      t        |   ||      }|D cg c]  }d| 	 c}S c c}w )NzQapi_base not set for LiteLLM Proxy route. Set in env via `LITELLM_PROXY_API_BASE`)r(   r'   litellm_proxy/)r.   
ValueErrorr   
get_models)r   r(   r'   modelsr   r   s        r   r2   z!LiteLLMProxyChatConfig.get_models-   sd     !EEhPWX'c  #Gh#G6<=U.(===s   Ac                      | xs t        d      S )Nr+   r,   )r(   s    r   get_api_keyz"LiteLLMProxyChatConfig.get_api_key8   s    A.)@AAr   litellm_paramsc                 h    ddl }t        d      du ry| r| j                  du ry|j                  du ryy)a  
        Returns True if litellm proxy should be used by default for a given request

        Issue: https://github.com/BerriAI/litellm/issues/10559

        Use case:
        - When using Google ADK, users want a flag to dynamically enable sending the request to litellm proxy or not
        - Allow the model name to be passed in original format and still use litellm proxy:
        "gemini/gemini-1.5-pro", "openai/gpt-4", "mistral/llama-2-70b-chat" etc.
        r   NUSE_LITELLM_PROXYTF)litellmr   use_litellm_proxy)r6   r9   s     r   $_should_use_litellm_proxy_by_defaultz;LiteLLMProxyChatConfig._should_use_litellm_proxy_by_default<   s?     	./47n>>$F$$,r   c                     ddl }d}| j                  d      r| j                  dd      d   } |j                         j	                  ||      \  }}| |||fS )a  
        Force use litellm proxy for all models

        Issue: https://github.com/BerriAI/litellm/issues/10559

        Expected behavior:
        - custom_llm_provider will be 'litellm_proxy'
        - api_base = api_base OR LITELLM_PROXY_API_BASE
        - api_key = api_key OR LITELLM_PROXY_API_KEY

        Use case:
        - When using Google ADK, users want a flag to dynamically enable sending the request to litellm proxy or not
        -  Allow the model name to be passed in original format and still use litellm proxy:
        "gemini/gemini-1.5-pro", "openai/gpt-4", "mistral/llama-2-70b-chat" etc.

        Return model, custom_llm_provider, dynamic_api_key, api_base
        r   Nlitellm_proxyr0   /   )r'   r(   )r9   
startswithsplitr   r.   )r   r'   r(   r9   custom_llm_providers        r   *litellm_proxy_get_custom_llm_provider_infozALiteLLMProxyChatConfig.litellm_proxy_get_custom_llm_provider_infoT   sr    * 	-,-KKQ'*E
 **,QQw R 
	

 )7H<<r   messagesr   headersc                     ||d|S N)r   rD    r   r   rD   r   r6   rE   s         r   transform_requestz(LiteLLMProxyChatConfig.transform_requestx   s      
 
 	
r   c                    K   ||d|S wrG   rH   rI   s         r   async_transform_requestz.LiteLLMProxyChatConfig.async_transform_request   s"       
 
 	
s   
)NN)N)__name__
__module____qualname__strr   r   dictboolr&   r   r   r.   r2   staticmethodr5   r	   r;   rC   rJ   rL   __classcell__)r   s   @r   r   r      s        	
  
) )08)	x}hsm+	,) HL	>}	>7?}	>	c	> BXc] Bhsm B B 37 0 . MQ!=!=&sm!==Ec]!=	sC#5	6!= !=F

 )*
 	

 
 
 



 )*
 	

 
 
 

r   r   N)__doc__typingr   r   r   r   litellm.secret_managers.mainr   r   litellm.types.routerr	   openai.chat.gpt_transformationr   litellm.types.llms.openair   r   rH   r   r   <module>r[      s/    8 7 H / =:D
_ D
r   