diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 01887616c..da89b7796 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -718,8 +718,22 @@ class OpenAIChatCompletion(BaseLLM): return convert_to_model_response_object(response_object=response, model_response_object=model_response, response_type="image_generation") # type: ignore except OpenAIError as e: exception_mapping_worked = True + ## LOGGING + logging_obj.post_call( + input=prompt, + api_key=api_key, + additional_args={"complete_input_dict": data}, + original_response=str(e), + ) raise e except Exception as e: + ## LOGGING + logging_obj.post_call( + input=prompt, + api_key=api_key, + additional_args={"complete_input_dict": data}, + original_response=str(e), + ) if hasattr(e, "status_code"): raise OpenAIError(status_code=e.status_code, message=str(e)) else: diff --git a/litellm/main.py b/litellm/main.py index df5982c9c..b6fb15f1a 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -3076,7 +3076,7 @@ def image_generation( custom_llm_provider=custom_llm_provider, **non_default_params, ) - logging = litellm_logging_obj + logging: Logging = litellm_logging_obj logging.update_environment_variables( model=model, user=user, diff --git a/litellm/tests/test_custom_callback_input.py b/litellm/tests/test_custom_callback_input.py index a61cc843e..266303df1 100644 --- a/litellm/tests/test_custom_callback_input.py +++ b/litellm/tests/test_custom_callback_input.py @@ -819,47 +819,49 @@ async def test_async_embedding_azure_caching(): # Image Generation -# ## Test OpenAI + Sync -# def test_image_generation_openai(): -# try: -# customHandler_success = CompletionCustomHandler() -# customHandler_failure = CompletionCustomHandler() -# litellm.callbacks = [customHandler_success] +## Test OpenAI + Sync +def test_image_generation_openai(): + try: + customHandler_success = CompletionCustomHandler() + customHandler_failure = CompletionCustomHandler() + # litellm.callbacks = [customHandler_success] -# litellm.set_verbose = True + # litellm.set_verbose = True -# response = litellm.image_generation( -# prompt="A cute baby sea otter", model="dall-e-3" -# ) + # response = litellm.image_generation( + # prompt="A cute baby sea otter", model="dall-e-3" + # ) -# print(f"response: {response}") -# assert len(response.data) > 0 + # print(f"response: {response}") + # assert len(response.data) > 0 -# print(f"customHandler_success.errors: {customHandler_success.errors}") -# print(f"customHandler_success.states: {customHandler_success.states}") -# assert len(customHandler_success.errors) == 0 -# assert len(customHandler_success.states) == 3 # pre, post, success -# # test failure callback -# litellm.callbacks = [customHandler_failure] -# try: -# response = litellm.image_generation( -# prompt="A cute baby sea otter", model="dall-e-4" -# ) -# except: -# pass -# print(f"customHandler_failure.errors: {customHandler_failure.errors}") -# print(f"customHandler_failure.states: {customHandler_failure.states}") -# assert len(customHandler_failure.errors) == 0 -# assert len(customHandler_failure.states) == 3 # pre, post, failure -# except litellm.RateLimitError as e: -# pass -# except litellm.ContentPolicyViolationError: -# pass # OpenAI randomly raises these errors - skip when they occur -# except Exception as e: -# pytest.fail(f"An exception occurred - {str(e)}") + # print(f"customHandler_success.errors: {customHandler_success.errors}") + # print(f"customHandler_success.states: {customHandler_success.states}") + # assert len(customHandler_success.errors) == 0 + # assert len(customHandler_success.states) == 3 # pre, post, success + # test failure callback + litellm.callbacks = [customHandler_failure] + try: + response = litellm.image_generation( + prompt="A cute baby sea otter", + model="dall-e-2", + api_key="my-bad-api-key", + ) + except: + pass + print(f"customHandler_failure.errors: {customHandler_failure.errors}") + print(f"customHandler_failure.states: {customHandler_failure.states}") + assert len(customHandler_failure.errors) == 0 + assert len(customHandler_failure.states) == 3 # pre, post, failure + except litellm.RateLimitError as e: + pass + except litellm.ContentPolicyViolationError: + pass # OpenAI randomly raises these errors - skip when they occur + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") -# test_image_generation_openai() +test_image_generation_openai() ## Test OpenAI + Async ## Test Azure + Sync diff --git a/litellm/utils.py b/litellm/utils.py index d8f535ca4..ed86f2fae 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2030,14 +2030,15 @@ def client(original_function): start_time=start_time, ) ## check if metadata is passed in + litellm_params = {} if "metadata" in kwargs: - litellm_params = {"metadata": kwargs["metadata"]} - logging_obj.update_environment_variables( - model=model, - user="", - optional_params={}, - litellm_params=litellm_params, - ) + litellm_params["metadata"] = kwargs["metadata"] + logging_obj.update_environment_variables( + model=model, + user="", + optional_params={}, + litellm_params=litellm_params, + ) return logging_obj except Exception as e: import logging