LiteLLM Minor Fixes & Improvements (09/25/2024) (#5893)

* fix(langfuse.py): support new langfuse prompt_chat class init params

* fix(langfuse.py): handle new init values on prompt chat + prompt text templates

fixes error caused during langfuse logging

* docs(openai_compatible.md): clarify `openai/` handles correct routing for `/v1/completions` route

Fixes https://github.com/BerriAI/litellm/issues/5876

* fix(utils.py): handle unmapped gemini model optional param translation

Fixes https://github.com/BerriAI/litellm/issues/5888

* fix(o1_transformation.py): fix o-1 validation, to not raise error if temperature=1

Fixes https://github.com/BerriAI/litellm/issues/5884

* fix(prisma_client.py): refresh iam token

Fixes https://github.com/BerriAI/litellm/issues/5896

* fix: pass drop params where required

* fix(utils.py): pass drop_params correctly

* fix(types/vertex_ai.py): fix generation config

* test(test_max_completion_tokens.py): fix test

* fix(vertex_and_google_ai_studio_gemini.py): fix map openai params
This commit is contained in:
Krish Dholakia 2024-09-26 16:41:44 -07:00 committed by GitHub
parent 16c0307eab
commit a1d9e96b31
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 755 additions and 292 deletions

View file

@ -3239,8 +3239,15 @@ def get_optional_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
drop_params=(
drop_params
if drop_params is not None and isinstance(drop_params, bool)
else False
),
)
elif custom_llm_provider == "vertex_ai_beta":
elif custom_llm_provider == "vertex_ai_beta" or (
custom_llm_provider == "vertex_ai" and "gemini" in model
):
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
@ -3277,6 +3284,11 @@ def get_optional_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
drop_params=(
drop_params
if drop_params is not None and isinstance(drop_params, bool)
else False
),
)
elif custom_llm_provider == "vertex_ai" and model in litellm.vertex_mistral_models:
supported_params = get_supported_openai_params(
@ -3301,6 +3313,11 @@ def get_optional_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
drop_params=(
drop_params
if drop_params is not None and isinstance(drop_params, bool)
else False
),
)
elif custom_llm_provider == "sagemaker":
## check if unsupported param passed in
@ -3710,6 +3727,7 @@ def get_optional_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
drop_params=drop_params,
)
elif custom_llm_provider == "openrouter":
supported_params = get_supported_openai_params(
@ -3818,6 +3836,7 @@ def get_optional_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
drop_params=drop_params,
)
elif custom_llm_provider == "azure":
supported_params = get_supported_openai_params(