
    h                     ^    d Z ddlmZmZ erddlmZmZ dedddeeef   fdZ	ddd	d
defdZ
y)zi
This file is used to calculate the cost of the Gemini API.

Handles the context caching for Gemini API.
    )TYPE_CHECKINGTuple)	ModelInfoUsagemodelusager   returnc                 $    ddl m}  || |d      S )z
    Calculates the cost per token for a given model, prompt tokens, and completion tokens.

    Follows the same logic as Anthropic's cost per token calculation.
    r   )generic_cost_per_tokengemini)r   r   custom_llm_provider).litellm.litellm_core_utils.llm_cost_calc.utilsr   )r   r   r   s      _/var/www/Befach/backend/env/lib/python3.12/site-packages/litellm/llms/gemini/cost_calculator.pycost_per_tokenr      s     V!5h     
model_infor   c                     ddl m} d}d}| e| j                  Yt        | j                  |      rCt	        | j                  d      r-| j                  j
                  | j                  j
                  }nd}||z  }|S )zm
    Calculates the cost per web search request for a given model, prompt tokens, and completion tokens.
    r   )PromptTokensDetailsWrappergQ?web_search_requests)litellm.types.utilsr   prompt_tokens_details
isinstancehasattrr   )r   r   r   cost_per_web_search_requestnumber_of_web_search_requests
total_costs         r   r   r      s     ? #($%! 	''3u224NOE//1FG'';;G(-(C(C(W(W%()% -/LLJr   N)__doc__typingr   r   r   r   r   strfloatr   r    r   r   <module>r"      sR    (4
# 
g 
%u2E 
w K E r   