diff --git a/litellm/proxy/hooks/parallel_request_limiter.py b/litellm/proxy/hooks/parallel_request_limiter.py index 0a38e5eded..30877daf39 100644 --- a/litellm/proxy/hooks/parallel_request_limiter.py +++ b/litellm/proxy/hooks/parallel_request_limiter.py @@ -186,4 +186,4 @@ class MaxParallelRequestsHandler(CustomLogger): request_count_api_key, new_val, ttl=60 ) # save in cache for up to 1 min. except Exception as e: - self.print_verbose(f"An exception occurred - {str(e)}") # noqa + print(f"An exception occurred - {str(e)}") # noqa diff --git a/litellm/utils.py b/litellm/utils.py index dbfbf46ecb..325139d9d4 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -773,7 +773,7 @@ class Logging: self.model = model self.user = user self.litellm_params = litellm_params - self.logger_fn = litellm_params["logger_fn"] + self.logger_fn = litellm_params.get("logger_fn", None) print_verbose(f"self.optional_params: {self.optional_params}") self.model_call_details = { "model": self.model, @@ -1941,6 +1941,15 @@ def client(original_function): call_type=call_type, start_time=start_time, ) + ## check if metadata is passed in + if "metadata" in kwargs: + litellm_params = {"metadata": kwargs["metadata"]} + logging_obj.update_environment_variables( + model=model, + user="", + optional_params={}, + litellm_params=litellm_params, + ) return logging_obj except Exception as e: import logging @@ -5731,15 +5740,6 @@ def exception_type( model=model, llm_provider="openai", ) - else: - exception_mapping_worked = True - raise APIError( - status_code=original_exception.status_code, - message=f"OpenAIException - {original_exception.message}", - llm_provider="openai", - model=model, - request=original_exception.request, - ) else: # if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors raise APIConnectionError(