Litellm dev 12 07 2024 (#7086)

* fix(main.py): support passing max retries to azure/openai embedding integrations

Fixes https://github.com/BerriAI/litellm/issues/7003

* feat(team_endpoints.py): allow updating team model aliases

Closes https://github.com/BerriAI/litellm/issues/6956

* feat(router.py): allow specifying model id as fallback - skips any cooldown check

Allows a default model to be checked if all models in cooldown

s/o @micahjsmith

* docs(reliability.md): add fallback to specific model to docs

* fix(utils.py): new 'is_prompt_caching_valid_prompt' helper util

Allows user to identify if messages/tools have prompt caching

Related issue: https://github.com/BerriAI/litellm/issues/6784

* feat(router.py): store model id for prompt caching valid prompt

Allows routing to that model id on subsequent requests

* fix(router.py): only cache if prompt is valid prompt caching prompt

prevents storing unnecessary items in cache

* feat(router.py): support routing prompt caching enabled models to previous deployments

Closes https://github.com/BerriAI/litellm/issues/6784

* test: fix linting errors

* feat(databricks/): convert basemodel to dict and exclude none values

allow passing pydantic message to databricks

* fix(utils.py): ensure all chat completion messages are dict

* (feat) Track `custom_llm_provider` in LiteLLMSpendLogs (#7081)

* add custom_llm_provider to SpendLogsPayload

* add custom_llm_provider to SpendLogs

* add custom llm provider to SpendLogs payload

* test_spend_logs_payload

* Add MLflow to the side bar (#7031)

Signed-off-by: B-Step62 <yuki.watanabe@databricks.com>

* (bug fix) SpendLogs update DB catch all possible DB errors for retrying  (#7082)

* catch DB_CONNECTION_ERROR_TYPES

* fix DB retry mechanism for SpendLog updates

* use DB_CONNECTION_ERROR_TYPES in auth checks

* fix exp back off for writing SpendLogs

* use _raise_failed_update_spend_exception to ensure errors print as NON blocking

* test_update_spend_logs_multiple_batches_with_failure

* (Feat) Add StructuredOutputs support for Fireworks.AI (#7085)

* fix model cost map fireworks ai "supports_response_schema": true,

* fix supports_response_schema

* fix map openai params fireworks ai

* test_map_response_format

* test_map_response_format

* added deepinfra/Meta-Llama-3.1-405B-Instruct (#7084)

* bump: version 1.53.9 → 1.54.0

* fix deepinfra

* litellm db fixes LiteLLM_UserTable (#7089)

* ci/cd queue new release

* fix llama-3.3-70b-versatile

* refactor - use consistent file naming convention `AI21/` -> `ai21`  (#7090)

* fix refactor - use consistent file naming convention

* ci/cd run again

* fix naming structure

* fix use consistent naming (#7092)

---------

Signed-off-by: B-Step62 <yuki.watanabe@databricks.com>
Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com>
Co-authored-by: ali sayyah <ali.sayyah2@gmail.com>
This commit is contained in:
Krish Dholakia 2024-12-08 00:30:33 -08:00 committed by GitHub
parent 664d82ca9e
commit 70c4e1b4d2
24 changed files with 840 additions and 193 deletions

View file

@ -36,6 +36,7 @@ from typing import (
Tuple,
TypedDict,
Union,
cast,
)
import httpx
@ -96,6 +97,7 @@ from litellm.router_utils.router_callbacks.track_deployment_metrics import (
)
from litellm.scheduler import FlowItem, Scheduler
from litellm.types.llms.openai import (
AllMessageValues,
Assistant,
AssistantToolParam,
AsyncCursorPage,
@ -149,10 +151,12 @@ from litellm.utils import (
get_llm_provider,
get_secret,
get_utc_datetime,
is_prompt_caching_valid_prompt,
is_region_allowed,
)
from .router_utils.pattern_match_deployments import PatternMatchRouter
from .router_utils.prompt_caching_cache import PromptCachingCache
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
@ -737,7 +741,9 @@ class Router:
model_client = potential_model_client
### DEPLOYMENT-SPECIFIC PRE-CALL CHECKS ### (e.g. update rpm pre-call. Raise error, if deployment over limit)
self.routing_strategy_pre_call_checks(deployment=deployment)
## only run if model group given, not model id
if model not in self.get_model_ids():
self.routing_strategy_pre_call_checks(deployment=deployment)
response = litellm.completion(
**{
@ -2787,8 +2793,10 @@ class Router:
*args,
**input_kwargs,
)
return response
except Exception as new_exception:
traceback.print_exc()
parent_otel_span = _get_parent_otel_span_from_kwargs(kwargs)
verbose_router_logger.error(
"litellm.router.py::async_function_with_fallbacks() - Error occurred while trying to do fallbacks - {}\n{}\n\nDebug Information:\nCooldown Deployments={}".format(
@ -3376,6 +3384,29 @@ class Router:
deployment_id=id,
)
## PROMPT CACHING
prompt_cache = PromptCachingCache(
cache=self.cache,
)
if (
standard_logging_object["messages"] is not None
and isinstance(standard_logging_object["messages"], list)
and deployment_name is not None
and isinstance(deployment_name, str)
):
valid_prompt = is_prompt_caching_valid_prompt(
messages=standard_logging_object["messages"], # type: ignore
tools=None,
model=deployment_name,
custom_llm_provider=None,
)
if valid_prompt:
await prompt_cache.async_add_model_id(
model_id=id,
messages=standard_logging_object["messages"], # type: ignore
tools=None,
)
return tpm_key
except Exception as e:
@ -5190,7 +5221,6 @@ class Router:
- List, if multiple models chosen
- Dict, if specific model chosen
"""
# check if aliases set on litellm model alias map
if specific_deployment is True:
return model, self._get_deployment_by_litellm_model(model=model)
@ -5302,13 +5332,6 @@ class Router:
cooldown_deployments=cooldown_deployments,
)
# filter pre-call checks
_allowed_model_region = (
request_kwargs.get("allowed_model_region")
if request_kwargs is not None
else None
)
if self.enable_pre_call_checks and messages is not None:
healthy_deployments = self._pre_call_checks(
model=model,
@ -5317,6 +5340,24 @@ class Router:
request_kwargs=request_kwargs,
)
if messages is not None and is_prompt_caching_valid_prompt(
messages=cast(List[AllMessageValues], messages),
model=model,
custom_llm_provider=None,
):
prompt_cache = PromptCachingCache(
cache=self.cache,
)
healthy_deployment = (
await prompt_cache.async_get_prompt_caching_deployment(
router=self,
messages=cast(List[AllMessageValues], messages),
tools=None,
)
)
if healthy_deployment is not None:
return healthy_deployment
# check if user wants to do tag based routing
healthy_deployments = await get_deployments_for_tag( # type: ignore
llm_router_instance=self,