mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(proxy/utils.py): add stronger typing for litellm params in failure call logging
This commit is contained in:
parent
34575293a5
commit
1a83935aa4
3 changed files with 30 additions and 4 deletions
|
@ -25,7 +25,7 @@ from typing_extensions import overload
|
||||||
import litellm
|
import litellm
|
||||||
import litellm.litellm_core_utils
|
import litellm.litellm_core_utils
|
||||||
import litellm.litellm_core_utils.litellm_logging
|
import litellm.litellm_core_utils.litellm_logging
|
||||||
from litellm import EmbeddingResponse, ImageResponse, ModelResponse
|
from litellm import EmbeddingResponse, ImageResponse, ModelResponse, get_litellm_params
|
||||||
from litellm._logging import verbose_proxy_logger
|
from litellm._logging import verbose_proxy_logger
|
||||||
from litellm._service_logger import ServiceLogging, ServiceTypes
|
from litellm._service_logger import ServiceLogging, ServiceTypes
|
||||||
from litellm.caching import DualCache, RedisCache
|
from litellm.caching import DualCache, RedisCache
|
||||||
|
@ -50,7 +50,7 @@ from litellm.proxy.hooks.max_budget_limiter import _PROXY_MaxBudgetLimiter
|
||||||
from litellm.proxy.hooks.parallel_request_limiter import (
|
from litellm.proxy.hooks.parallel_request_limiter import (
|
||||||
_PROXY_MaxParallelRequestsHandler,
|
_PROXY_MaxParallelRequestsHandler,
|
||||||
)
|
)
|
||||||
from litellm.types.utils import CallTypes
|
from litellm.types.utils import CallTypes, LoggedLiteLLMParams
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from opentelemetry.trace import Span as _Span
|
from opentelemetry.trace import Span as _Span
|
||||||
|
@ -602,14 +602,20 @@ class ProxyLogging:
|
||||||
if litellm_logging_obj is not None:
|
if litellm_logging_obj is not None:
|
||||||
## UPDATE LOGGING INPUT
|
## UPDATE LOGGING INPUT
|
||||||
_optional_params = {}
|
_optional_params = {}
|
||||||
|
_litellm_params = {}
|
||||||
|
|
||||||
|
litellm_param_keys = LoggedLiteLLMParams.__annotations__.keys()
|
||||||
for k, v in request_data.items():
|
for k, v in request_data.items():
|
||||||
if k != "model" and k != "user" and k != "litellm_params":
|
if k in litellm_param_keys:
|
||||||
|
_litellm_params[k] = v
|
||||||
|
elif k != "model" and k != "user":
|
||||||
_optional_params[k] = v
|
_optional_params[k] = v
|
||||||
|
|
||||||
litellm_logging_obj.update_environment_variables(
|
litellm_logging_obj.update_environment_variables(
|
||||||
model=request_data.get("model", ""),
|
model=request_data.get("model", ""),
|
||||||
user=request_data.get("user", ""),
|
user=request_data.get("user", ""),
|
||||||
optional_params=_optional_params,
|
optional_params=_optional_params,
|
||||||
litellm_params=request_data.get("litellm_params", {}),
|
litellm_params=_litellm_params,
|
||||||
)
|
)
|
||||||
|
|
||||||
input: Union[list, str, dict] = ""
|
input: Union[list, str, dict] = ""
|
||||||
|
|
|
@ -234,6 +234,7 @@ class CompletionCustomHandler(
|
||||||
)
|
)
|
||||||
assert isinstance(kwargs["optional_params"], dict)
|
assert isinstance(kwargs["optional_params"], dict)
|
||||||
assert isinstance(kwargs["litellm_params"], dict)
|
assert isinstance(kwargs["litellm_params"], dict)
|
||||||
|
assert isinstance(kwargs["litellm_params"]["metadata"], Optional[dict])
|
||||||
assert isinstance(kwargs["start_time"], (datetime, type(None)))
|
assert isinstance(kwargs["start_time"], (datetime, type(None)))
|
||||||
assert isinstance(kwargs["stream"], bool)
|
assert isinstance(kwargs["stream"], bool)
|
||||||
assert isinstance(kwargs["user"], (str, type(None)))
|
assert isinstance(kwargs["user"], (str, type(None)))
|
||||||
|
|
|
@ -1029,3 +1029,22 @@ class GenericImageParsingChunk(TypedDict):
|
||||||
class ResponseFormatChunk(TypedDict, total=False):
|
class ResponseFormatChunk(TypedDict, total=False):
|
||||||
type: Required[Literal["json_object", "text"]]
|
type: Required[Literal["json_object", "text"]]
|
||||||
response_schema: dict
|
response_schema: dict
|
||||||
|
|
||||||
|
|
||||||
|
class LoggedLiteLLMParams(TypedDict, total=False):
|
||||||
|
force_timeout: Optional[float]
|
||||||
|
custom_llm_provider: Optional[str]
|
||||||
|
api_base: Optional[str]
|
||||||
|
litellm_call_id: Optional[str]
|
||||||
|
model_alias_map: Optional[dict]
|
||||||
|
metadata: Optional[dict]
|
||||||
|
model_info: Optional[dict]
|
||||||
|
proxy_server_request: Optional[dict]
|
||||||
|
acompletion: Optional[bool]
|
||||||
|
preset_cache_key: Optional[str]
|
||||||
|
no_log: Optional[bool]
|
||||||
|
input_cost_per_second: Optional[float]
|
||||||
|
input_cost_per_token: Optional[float]
|
||||||
|
output_cost_per_token: Optional[float]
|
||||||
|
output_cost_per_second: Optional[float]
|
||||||
|
cooldown_time: Optional[float]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue