fix: fix bugs

This commit is contained in:
Krrish Dholakia 2024-02-13 22:59:07 -08:00
parent 3ef391800a
commit f9ab7e7b14
2 changed files with 11 additions and 26 deletions

View file

@ -231,7 +231,7 @@ class LangFuseLogger:
print_verbose(f"Langfuse Layer Logging - logging to langfuse v2 ") print_verbose(f"Langfuse Layer Logging - logging to langfuse v2 ")
if supports_tags: if supports_tags:
metadata_tags = metadata["tags"] metadata_tags = metadata.get("tags", [])
tags = metadata_tags tags = metadata_tags
generation_name = metadata.get("generation_name", None) generation_name = metadata.get("generation_name", None)

View file

@ -166,9 +166,9 @@ class ProxyException(Exception):
async def openai_exception_handler(request: Request, exc: ProxyException): async def openai_exception_handler(request: Request, exc: ProxyException):
# NOTE: DO NOT MODIFY THIS, its crucial to map to Openai exceptions # NOTE: DO NOT MODIFY THIS, its crucial to map to Openai exceptions
return JSONResponse( return JSONResponse(
status_code=int(exc.code) status_code=(
if exc.code int(exc.code) if exc.code else status.HTTP_500_INTERNAL_SERVER_ERROR
else status.HTTP_500_INTERNAL_SERVER_ERROR, ),
content={ content={
"error": { "error": {
"message": exc.message, "message": exc.message,
@ -1885,24 +1885,7 @@ async def async_data_generator(response, user_api_key_dict):
def select_data_generator(response, user_api_key_dict): def select_data_generator(response, user_api_key_dict):
try: return async_data_generator(response=response, user_api_key_dict=user_api_key_dict)
# since boto3 - sagemaker does not support async calls, we should use a sync data_generator
if hasattr(
response, "custom_llm_provider"
) and response.custom_llm_provider in ["sagemaker"]:
return data_generator(
response=response,
)
else:
# default to async_data_generator
return async_data_generator(
response=response, user_api_key_dict=user_api_key_dict
)
except:
# worst case - use async_data_generator
return async_data_generator(
response=response, user_api_key_dict=user_api_key_dict
)
def get_litellm_model_info(model: dict = {}): def get_litellm_model_info(model: dict = {}):
@ -4483,9 +4466,11 @@ async def get_routes():
"path": getattr(route, "path", None), "path": getattr(route, "path", None),
"methods": getattr(route, "methods", None), "methods": getattr(route, "methods", None),
"name": getattr(route, "name", None), "name": getattr(route, "name", None),
"endpoint": getattr(route, "endpoint", None).__name__ "endpoint": (
getattr(route, "endpoint", None).__name__
if getattr(route, "endpoint", None) if getattr(route, "endpoint", None)
else None, else None
),
} }
routes.append(route_info) routes.append(route_info)