From f9ab7e7b1425fa5cb2b7147eb652743b978824ac Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 13 Feb 2024 22:59:07 -0800 Subject: [PATCH] fix: fix bugs --- litellm/integrations/langfuse.py | 4 ++-- litellm/proxy/proxy_server.py | 33 +++++++++----------------------- 2 files changed, 11 insertions(+), 26 deletions(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 5a57d1359..b548f22bc 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -231,7 +231,7 @@ class LangFuseLogger: print_verbose(f"Langfuse Layer Logging - logging to langfuse v2 ") if supports_tags: - metadata_tags = metadata["tags"] + metadata_tags = metadata.get("tags", []) tags = metadata_tags generation_name = metadata.get("generation_name", None) @@ -278,7 +278,7 @@ class LangFuseLogger: "prompt_tokens": response_obj["usage"]["prompt_tokens"], "completion_tokens": response_obj["usage"]["completion_tokens"], "total_cost": cost if supports_costs else None, - } + } generation_params = { "name": generation_name, "id": metadata.get("generation_id", generation_id), diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 4927f3db4..43a80bf2f 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -166,9 +166,9 @@ class ProxyException(Exception): async def openai_exception_handler(request: Request, exc: ProxyException): # NOTE: DO NOT MODIFY THIS, its crucial to map to Openai exceptions return JSONResponse( - status_code=int(exc.code) - if exc.code - else status.HTTP_500_INTERNAL_SERVER_ERROR, + status_code=( + int(exc.code) if exc.code else status.HTTP_500_INTERNAL_SERVER_ERROR + ), content={ "error": { "message": exc.message, @@ -1885,24 +1885,7 @@ async def async_data_generator(response, user_api_key_dict): def select_data_generator(response, user_api_key_dict): - try: - # since boto3 - sagemaker does not support async calls, we should use a sync data_generator - if hasattr( - response, "custom_llm_provider" - ) and response.custom_llm_provider in ["sagemaker"]: - return data_generator( - response=response, - ) - else: - # default to async_data_generator - return async_data_generator( - response=response, user_api_key_dict=user_api_key_dict - ) - except: - # worst case - use async_data_generator - return async_data_generator( - response=response, user_api_key_dict=user_api_key_dict - ) + return async_data_generator(response=response, user_api_key_dict=user_api_key_dict) def get_litellm_model_info(model: dict = {}): @@ -4483,9 +4466,11 @@ async def get_routes(): "path": getattr(route, "path", None), "methods": getattr(route, "methods", None), "name": getattr(route, "name", None), - "endpoint": getattr(route, "endpoint", None).__name__ - if getattr(route, "endpoint", None) - else None, + "endpoint": ( + getattr(route, "endpoint", None).__name__ + if getattr(route, "endpoint", None) + else None + ), } routes.append(route_info)