LiteLLM Minor Fixes & Improvements (10/05/2024) (#6083)

* docs(prompt_caching.md): add prompt caching cost calc example to docs

* docs(prompt_caching.md): add proxy examples to docs

* feat(utils.py): expose new helper `supports_prompt_caching()` to check if a model supports prompt caching

* docs(prompt_caching.md): add docs on checking model support for prompt caching

* build: fix invalid json
This commit is contained in:
Krish Dholakia 2024-10-05 18:59:11 -04:00 committed by GitHub
parent fac3b2ee42
commit f2c0a31e3c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 459 additions and 59 deletions

View file

@ -2179,6 +2179,40 @@ def supports_function_calling(
)
def supports_prompt_caching(
model: str, custom_llm_provider: Optional[str] = None
) -> bool:
"""
Check if the given model supports prompt caching and return a boolean value.
Parameters:
model (str): The model name to be checked.
custom_llm_provider (Optional[str]): The provider to be checked.
Returns:
bool: True if the model supports prompt caching, False otherwise.
Raises:
Exception: If the given model is not found or there's an error in retrieval.
"""
try:
model, custom_llm_provider, _, _ = litellm.get_llm_provider(
model=model, custom_llm_provider=custom_llm_provider
)
model_info = litellm.get_model_info(
model=model, custom_llm_provider=custom_llm_provider
)
if model_info.get("supports_prompt_caching", False) is True:
return True
return False
except Exception as e:
raise Exception(
f"Model not found or error in checking prompt caching support. You passed model={model}, custom_llm_provider={custom_llm_provider}. Error: {str(e)}"
)
def supports_vision(model: str, custom_llm_provider: Optional[str] = None) -> bool:
"""
Check if the given model supports vision and return a boolean value.