LiteLLM Minor Fixes & Improvements (12/05/2024) (#7051)

* fix(cost_calculator.py): move to using `.get_model_info()` for cost per token calculations

ensures cost tracking is reliable - handles edge cases of parsing model cost map

* build(model_prices_and_context_window.json): add 'supports_response_schema' for select tgai models

Fixes https://github.com/BerriAI/litellm/pull/7037#discussion_r1872157329

* build(model_prices_and_context_window.json): remove 'pdf input' and 'vision' support from nova micro in model map

Bedrock docs indicate no support for micro - https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference-supported-models-features.html

* fix(converse_transformation.py): support amazon nova tool use

* fix(opentelemetry): Add missing LLM request type attribute to spans (#7041)

* feat(opentelemetry): add LLM request type attribute to spans

* lint

* fix: curl usage (#7038)

curl -d, --data <data> is lowercase d
curl -D, --dump-header <filename> is uppercase D

references:
https://curl.se/docs/manpage.html#-d
https://curl.se/docs/manpage.html#-D

* fix(spend_tracking.py): handle empty 'id' in model response - when creating spend log

Fixes https://github.com/BerriAI/litellm/issues/7023

* fix(streaming_chunk_builder.py): handle initial id being empty string

Fixes https://github.com/BerriAI/litellm/issues/7023

* fix(anthropic_passthrough_logging_handler.py): add end user cost tracking for anthropic pass through endpoint

* docs(pass_through/): refactor docs location + add table on supported features for pass through endpoints

* feat(anthropic_passthrough_logging_handler.py): support end user cost tracking via anthropic sdk

* docs(anthropic_completion.md): add docs on passing end user param for cost tracking on anthropic sdk

* fix(litellm_logging.py): use standard logging payload if present in kwargs

prevent datadog logging error for pass through endpoints

* docs(bedrock.md): add rerank api usage example to docs

* bugfix/change dummy tool name format (#7053)

* fix viewing keys (#7042)

* ui new build

* build(model_prices_and_context_window.json): add bedrock region models to model cost map (#7044)

* bye (#6982)

* (fix) litellm router.aspeech  (#6962)

* doc Migrating Databases

* fix aspeech on router

* test_audio_speech_router

* test_audio_speech_router

* docs show supported providers on batches api doc

* change dummy tool name format

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: Krish Dholakia <krrishdholakia@gmail.com>
Co-authored-by: yujonglee <yujonglee.dev@gmail.com>

* fix: fix linting errors

* test: update test

* fix(litellm_logging.py): fix pass through check

* fix(test_otel_logging.py): fix test

* fix(cost_calculator.py): update handling for cost per second

* fix(cost_calculator.py): fix cost check

* test: fix test

* (fix) adding public routes when using custom header  (#7045)

* get_api_key_from_custom_header

* add test_get_api_key_from_custom_header

* fix testing use 1 file for test user api key auth

* fix test user api key auth

* test_custom_api_key_header_name

* build: update ui build

---------

Co-authored-by: Doron Kopit <83537683+doronkopit5@users.noreply.github.com>
Co-authored-by: lloydchang <lloydchang@gmail.com>
Co-authored-by: hgulersen <haymigulersen@gmail.com>
Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: yujonglee <yujonglee.dev@gmail.com>
This commit is contained in:
Krish Dholakia 2024-12-06 14:29:53 -08:00 committed by GitHub
parent 56956fd6e7
commit 92a7e8e3e9
108 changed files with 561 additions and 356 deletions

View file

@ -777,7 +777,12 @@ class Logging:
return None
def _success_handler_helper_fn(
self, result=None, start_time=None, end_time=None, cache_hit=None
self,
result=None,
start_time=None,
end_time=None,
cache_hit=None,
standard_logging_object: Optional[StandardLoggingPayload] = None,
):
try:
if start_time is None:
@ -795,7 +800,9 @@ class Logging:
## if model in model cost map - log the response cost
## else set cost to None
if (
result is not None and self.stream is not True
standard_logging_object is None
and result is not None
and self.stream is not True
): # handle streaming separately
if (
isinstance(result, ModelResponse)
@ -826,9 +833,11 @@ class Logging:
"metadata"
] = {}
self.model_call_details["litellm_params"]["metadata"][
self.model_call_details["litellm_params"]["metadata"][ # type: ignore
"hidden_params"
] = getattr(result, "_hidden_params", {})
] = getattr(
result, "_hidden_params", {}
)
## STANDARDIZED LOGGING PAYLOAD
self.model_call_details["standard_logging_object"] = (
@ -853,6 +862,10 @@ class Logging:
status="success",
)
)
elif standard_logging_object is not None:
self.model_call_details["standard_logging_object"] = (
standard_logging_object
)
else: # streaming chunks + image gen.
self.model_call_details["response_cost"] = None
@ -885,6 +898,7 @@ class Logging:
end_time=end_time,
result=result,
cache_hit=cache_hit,
standard_logging_object=kwargs.get("standard_logging_object", None),
)
# print(f"original response in success handler: {self.model_call_details['original_response']}")
try:
@ -896,7 +910,10 @@ class Logging:
] = None
if "complete_streaming_response" in self.model_call_details:
return # break out of this.
if self.stream:
if self.stream and (
isinstance(result, litellm.ModelResponse)
or isinstance(result, TextCompletionResponse)
):
complete_streaming_response: Optional[
Union[ModelResponse, TextCompletionResponse]
] = _assemble_complete_response_from_streaming_chunks(
@ -1315,6 +1332,8 @@ class Logging:
"atranscription", False
)
is not True
and self.call_type
!= CallTypes.pass_through.value # pass-through endpoints call async_log_success_event
): # custom logger class
if self.stream and complete_streaming_response is None:
callback.log_stream_event(
@ -1399,7 +1418,11 @@ class Logging:
"Logging Details LiteLLM-Async Success Call, cache_hit={}".format(cache_hit)
)
start_time, end_time, result = self._success_handler_helper_fn(
start_time=start_time, end_time=end_time, result=result, cache_hit=cache_hit
start_time=start_time,
end_time=end_time,
result=result,
cache_hit=cache_hit,
standard_logging_object=kwargs.get("standard_logging_object", None),
)
## BUILD COMPLETE STREAMED RESPONSE
if "async_complete_streaming_response" in self.model_call_details:
@ -1407,7 +1430,10 @@ class Logging:
complete_streaming_response: Optional[
Union[ModelResponse, TextCompletionResponse]
] = None
if self.stream is True:
if self.stream is True and (
isinstance(result, litellm.ModelResponse)
or isinstance(result, TextCompletionResponse)
):
complete_streaming_response: Optional[
Union[ModelResponse, TextCompletionResponse]
] = _assemble_complete_response_from_streaming_chunks(

View file

@ -47,9 +47,20 @@ class ChunkProcessor:
model_response._hidden_params = chunk.get("_hidden_params", {})
return model_response
@staticmethod
def _get_chunk_id(chunks: List[Dict[str, Any]]) -> str:
"""
Chunks:
[{"id": ""}, {"id": "1"}, {"id": "1"}]
"""
for chunk in chunks:
if chunk.get("id"):
return chunk["id"]
return ""
def build_base_response(self, chunks: List[Dict[str, Any]]) -> ModelResponse:
chunk = self.first_chunk
id = chunk["id"]
id = ChunkProcessor._get_chunk_id(chunks)
object = chunk["object"]
created = chunk["created"]
model = chunk["model"]