Merge pull request #4571 from BerriAI/litellm_tts_pricing

feat(cost_calculator.py): support openai+azure tts calls
This commit is contained in:
Krish Dholakia 2024-07-06 14:58:05 -07:00 committed by GitHub
commit 97c9c2fde7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 18 additions and 11 deletions

View file

@ -18,6 +18,7 @@ from litellm.litellm_core_utils.llm_cost_calc.google import (
from litellm.litellm_core_utils.llm_cost_calc.utils import _generic_cost_per_character from litellm.litellm_core_utils.llm_cost_calc.utils import _generic_cost_per_character
from litellm.types.llms.openai import HttpxBinaryResponseContent from litellm.types.llms.openai import HttpxBinaryResponseContent
from litellm.types.router import SPECIAL_MODEL_INFO_PARAMS from litellm.types.router import SPECIAL_MODEL_INFO_PARAMS
from litellm.utils import ( from litellm.utils import (
CallTypes, CallTypes,
CostPerToken, CostPerToken,

View file

@ -3382,8 +3382,9 @@ async def embeddings(
) )
verbose_proxy_logger.debug(traceback.format_exc()) verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException): if isinstance(e, HTTPException):
message = get_error_message_str(e)
raise ProxyException( raise ProxyException(
message=getattr(e, "message", str(e)), message=message,
type=getattr(e, "type", "None"), type=getattr(e, "type", "None"),
param=getattr(e, "param", "None"), param=getattr(e, "param", "None"),
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST), code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),

View file

@ -2888,6 +2888,11 @@ def get_error_message_str(e: Exception) -> str:
error_message = e.detail error_message = e.detail
elif isinstance(e.detail, dict): elif isinstance(e.detail, dict):
error_message = json.dumps(e.detail) error_message = json.dumps(e.detail)
elif hasattr(e, "message"):
if isinstance(e.message, "str"):
error_message = e.message
elif isinstance(e.message, dict):
error_message = json.dumps(e.message)
else: else:
error_message = str(e) error_message = str(e)
else: else:

View file

@ -7521,7 +7521,7 @@ def exception_type(
if original_exception.status_code == 400: if original_exception.status_code == 400:
exception_mapping_worked = True exception_mapping_worked = True
raise BadRequestError( raise BadRequestError(
message=f"{exception_provider} - {message}", message=f"{exception_provider} - {error_str}",
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
model=model, model=model,
response=original_exception.response, response=original_exception.response,
@ -7530,7 +7530,7 @@ def exception_type(
elif original_exception.status_code == 401: elif original_exception.status_code == 401:
exception_mapping_worked = True exception_mapping_worked = True
raise AuthenticationError( raise AuthenticationError(
message=f"AuthenticationError: {exception_provider} - {message}", message=f"AuthenticationError: {exception_provider} - {error_str}",
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
model=model, model=model,
response=original_exception.response, response=original_exception.response,
@ -7539,7 +7539,7 @@ def exception_type(
elif original_exception.status_code == 404: elif original_exception.status_code == 404:
exception_mapping_worked = True exception_mapping_worked = True
raise NotFoundError( raise NotFoundError(
message=f"NotFoundError: {exception_provider} - {message}", message=f"NotFoundError: {exception_provider} - {error_str}",
model=model, model=model,
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
response=original_exception.response, response=original_exception.response,
@ -7548,7 +7548,7 @@ def exception_type(
elif original_exception.status_code == 408: elif original_exception.status_code == 408:
exception_mapping_worked = True exception_mapping_worked = True
raise Timeout( raise Timeout(
message=f"Timeout Error: {exception_provider} - {message}", message=f"Timeout Error: {exception_provider} - {error_str}",
model=model, model=model,
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
litellm_debug_info=extra_information, litellm_debug_info=extra_information,
@ -7556,7 +7556,7 @@ def exception_type(
elif original_exception.status_code == 422: elif original_exception.status_code == 422:
exception_mapping_worked = True exception_mapping_worked = True
raise BadRequestError( raise BadRequestError(
message=f"BadRequestError: {exception_provider} - {message}", message=f"BadRequestError: {exception_provider} - {error_str}",
model=model, model=model,
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
response=original_exception.response, response=original_exception.response,
@ -7565,7 +7565,7 @@ def exception_type(
elif original_exception.status_code == 429: elif original_exception.status_code == 429:
exception_mapping_worked = True exception_mapping_worked = True
raise RateLimitError( raise RateLimitError(
message=f"RateLimitError: {exception_provider} - {message}", message=f"RateLimitError: {exception_provider} - {error_str}",
model=model, model=model,
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
response=original_exception.response, response=original_exception.response,
@ -7574,7 +7574,7 @@ def exception_type(
elif original_exception.status_code == 503: elif original_exception.status_code == 503:
exception_mapping_worked = True exception_mapping_worked = True
raise ServiceUnavailableError( raise ServiceUnavailableError(
message=f"ServiceUnavailableError: {exception_provider} - {message}", message=f"ServiceUnavailableError: {exception_provider} - {error_str}",
model=model, model=model,
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
response=original_exception.response, response=original_exception.response,
@ -7583,7 +7583,7 @@ def exception_type(
elif original_exception.status_code == 504: # gateway timeout error elif original_exception.status_code == 504: # gateway timeout error
exception_mapping_worked = True exception_mapping_worked = True
raise Timeout( raise Timeout(
message=f"Timeout Error: {exception_provider} - {message}", message=f"Timeout Error: {exception_provider} - {error_str}",
model=model, model=model,
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
litellm_debug_info=extra_information, litellm_debug_info=extra_information,
@ -7592,7 +7592,7 @@ def exception_type(
exception_mapping_worked = True exception_mapping_worked = True
raise APIError( raise APIError(
status_code=original_exception.status_code, status_code=original_exception.status_code,
message=f"APIError: {exception_provider} - {message}", message=f"APIError: {exception_provider} - {error_str}",
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
model=model, model=model,
request=original_exception.request, request=original_exception.request,
@ -7601,7 +7601,7 @@ def exception_type(
else: else:
# if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors # if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
raise APIConnectionError( raise APIConnectionError(
message=f"APIConnectionError: {exception_provider} - {message}", message=f"APIConnectionError: {exception_provider} - {error_str}",
llm_provider=custom_llm_provider, llm_provider=custom_llm_provider,
model=model, model=model,
litellm_debug_info=extra_information, litellm_debug_info=extra_information,