
    h                         d Z ddlmZmZmZ ddlmZ ddlmZm	Z	 ddl
mZmZmZmZ ddlmZ dd	lmZ  G d
 dee      Zy)z
Support for gpt model family 
    )ListOptionalUnion)BaseTextCompletionConfig)AllMessageValuesOpenAITextCompletionUserMessage)ChoicesMessageModelResponseTextCompletionResponse   )OpenAIGPTConfig   _transform_promptc                       e Zd ZU dZdZee   ed<   dZee	   ed<   dZ
ee   ed<   dZee   ed<   dZee   ed<   dZee   ed<   dZee   ed	<   dZee   ed
<   dZeeeef      ed<   dZee   ed<   	 	 	 	 	 	 	 	 	 	 	 	 ddee   dee	   dee   dee   dee   dee   d	ee   d
ee   deeeef      dee   dee   dee   ddfdZe fd       Z	 	 ddee   dee   fdZdedefdZdedeee   ee    f   dededef
dZ! xZ"S )OpenAITextCompletionConfiga  
    Reference: https://platform.openai.com/docs/api-reference/completions/create

    The class `OpenAITextCompletionConfig` provides configuration for the OpenAI's text completion API interface. Below are the parameters:

    - `best_of` (integer or null): This optional parameter generates server-side completions and returns the one with the highest log probability per token.

    - `echo` (boolean or null): This optional parameter will echo back the prompt in addition to the completion.

    - `frequency_penalty` (number or null): Defaults to 0. It is a numbers from -2.0 to 2.0, where positive values decrease the model's likelihood to repeat the same line.

    - `logit_bias` (map): This optional parameter modifies the likelihood of specified tokens appearing in the completion.

    - `logprobs` (integer or null): This optional parameter includes the log probabilities on the most likely tokens as well as the chosen tokens.

    - `max_tokens` (integer or null): This optional parameter sets the maximum number of tokens to generate in the completion.

    - `n` (integer or null): This optional parameter sets how many completions to generate for each prompt.

    - `presence_penalty` (number or null): Defaults to 0 and can be between -2.0 and 2.0. Positive values increase the model's likelihood to talk about new topics.

    - `stop` (string / array / null): Specifies up to 4 sequences where the API will stop generating further tokens.

    - `suffix` (string or null): Defines the suffix that comes after a completion of inserted text.

    - `temperature` (number or null): This optional parameter defines the sampling temperature to use.

    - `top_p` (number or null): An alternative to sampling with temperature, used for nucleus sampling.
    Nbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltystopsuffixtemperaturetop_preturnc                     t               j                         }|j                         D ]%  \  }}|dk7  s|t        | j                  ||       ' y )Nself)localscopyitemssetattr	__class__)r"   r   r   r   r   r   r   r   r   r   r   r   r   locals_keyvalues                   i/var/www/Befach/backend/env/lib/python3.12/site-packages/litellm/llms/openai/completion/transformation.py__init__z#OpenAITextCompletionConfig.__init__9   sA     (--/!--/JCf}!2U3 *    c                      t         |          S )N)super
get_config)clsr'   s    r+   r0   z%OpenAITextCompletionConfig.get_configM   s    w!##r-   response_objectmodel_response_objectc           
         	 ||t        d      g }t        |d         D ]H  \  }}t        |d   d      }t        |d   |||j	                  dd             }|j                  |       J ||_        d	|v rt        |d	|d	          d
|v r
|d
   |_        d|v r
|d   |_	        ||j                  d<   |S # t        $ r}|d }~ww xY w)NzError in response object formatchoicestext	assistant)contentrolefinish_reasonr   )r:   indexmessager   usageidmodeloriginal_response)
ValueError	enumerater
   r	   getappendr5   r&   r>   r?   _hidden_params	Exception)r"   r2   r3   choice_listidxchoicer<   es           r+   %convert_to_chat_model_response_objectz@OpenAITextCompletionConfig.convert_to_chat_model_response_objectQ   s   
!	&*?*G !BCCK()CDV!"6N$ !"("9##ZZ
D9	 ""6*  E -8!)/)-w8PQ&+:4+@%(/).=g.F%+   "00# )( 	G	s   B1B4 4	C=B??Cr?   c                 
    g dS )N)	functionsfunction_callr   r   r   streamstream_optionsr   r   r   r   r   userresponse_formatseedtoolstool_choicemax_retriesr   top_logprobsextra_headers )r"   r?   s     r+   get_supported_openai_paramsz6OpenAITextCompletionConfig.get_supported_openai_paramsy   s    
 	
r-   messagesoptional_paramsheadersc                 &    t        |      }||d|S )N)r?   promptr   )r"   r?   r[   r\   r]   r_   s         r+   !transform_text_completion_requestz<OpenAITextCompletionConfig.transform_text_completion_request   s(     #8,
 
 	
r-   )NNNNNNNNNNNN)NN)#__name__
__module____qualname____doc__r   r   int__annotations__r   boolr   r   dictr   r   r   r   r   r   strlistr   floatr,   classmethodr0   r   r   rK   r   rZ   r   r   r`   __classcell__)r'   s   @r+   r   r      s%   < "GXc]!D(4.'+x}+!%J%"Hhsm" $J$Ax}&*hsm*'+D(5d#
$+ FHSM  "&#+/%)"&$(*.+/ $'+!%4#4 tn4 $C=	4
 TN4 3-4 SM4 C=4 #3-4 uS$Y'(4 4 e_4 4 
4( $ $
 =A9=&!"89&  (6&P
 
 
2

 -.5T0UUV
 	

 
 

r-   r   N)rd   typingr   r   r   /litellm.llms.base_llm.completion.transformationr   litellm.types.llms.openair   r   litellm.types.utilsr	   r
   r   r   chat.gpt_transformationr   utilsr   r   rY   r-   r+   <module>rt      s5    ) ( T W W W 5 $O
!9? O
r-   