forked from phoenix/litellm-mirror
Merge pull request #1646 from BerriAI/litellm_image_gen_cost_tracking_proxy
Litellm image gen cost tracking proxy
This commit is contained in:
commit
ba4089824d
4 changed files with 60 additions and 43 deletions
|
@ -718,8 +718,22 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
return convert_to_model_response_object(response_object=response, model_response_object=model_response, response_type="image_generation") # type: ignore
|
||||
except OpenAIError as e:
|
||||
exception_mapping_worked = True
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
input=prompt,
|
||||
api_key=api_key,
|
||||
additional_args={"complete_input_dict": data},
|
||||
original_response=str(e),
|
||||
)
|
||||
raise e
|
||||
except Exception as e:
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
input=prompt,
|
||||
api_key=api_key,
|
||||
additional_args={"complete_input_dict": data},
|
||||
original_response=str(e),
|
||||
)
|
||||
if hasattr(e, "status_code"):
|
||||
raise OpenAIError(status_code=e.status_code, message=str(e))
|
||||
else:
|
||||
|
|
|
@ -3076,7 +3076,7 @@ def image_generation(
|
|||
custom_llm_provider=custom_llm_provider,
|
||||
**non_default_params,
|
||||
)
|
||||
logging = litellm_logging_obj
|
||||
logging: Logging = litellm_logging_obj
|
||||
logging.update_environment_variables(
|
||||
model=model,
|
||||
user=user,
|
||||
|
|
|
@ -819,47 +819,49 @@ async def test_async_embedding_azure_caching():
|
|||
# Image Generation
|
||||
|
||||
|
||||
# ## Test OpenAI + Sync
|
||||
# def test_image_generation_openai():
|
||||
# try:
|
||||
# customHandler_success = CompletionCustomHandler()
|
||||
# customHandler_failure = CompletionCustomHandler()
|
||||
# litellm.callbacks = [customHandler_success]
|
||||
## Test OpenAI + Sync
|
||||
def test_image_generation_openai():
|
||||
try:
|
||||
customHandler_success = CompletionCustomHandler()
|
||||
customHandler_failure = CompletionCustomHandler()
|
||||
# litellm.callbacks = [customHandler_success]
|
||||
|
||||
# litellm.set_verbose = True
|
||||
# litellm.set_verbose = True
|
||||
|
||||
# response = litellm.image_generation(
|
||||
# prompt="A cute baby sea otter", model="dall-e-3"
|
||||
# )
|
||||
# response = litellm.image_generation(
|
||||
# prompt="A cute baby sea otter", model="dall-e-3"
|
||||
# )
|
||||
|
||||
# print(f"response: {response}")
|
||||
# assert len(response.data) > 0
|
||||
# print(f"response: {response}")
|
||||
# assert len(response.data) > 0
|
||||
|
||||
# print(f"customHandler_success.errors: {customHandler_success.errors}")
|
||||
# print(f"customHandler_success.states: {customHandler_success.states}")
|
||||
# assert len(customHandler_success.errors) == 0
|
||||
# assert len(customHandler_success.states) == 3 # pre, post, success
|
||||
# # test failure callback
|
||||
# litellm.callbacks = [customHandler_failure]
|
||||
# try:
|
||||
# response = litellm.image_generation(
|
||||
# prompt="A cute baby sea otter", model="dall-e-4"
|
||||
# )
|
||||
# except:
|
||||
# pass
|
||||
# print(f"customHandler_failure.errors: {customHandler_failure.errors}")
|
||||
# print(f"customHandler_failure.states: {customHandler_failure.states}")
|
||||
# assert len(customHandler_failure.errors) == 0
|
||||
# assert len(customHandler_failure.states) == 3 # pre, post, failure
|
||||
# except litellm.RateLimitError as e:
|
||||
# pass
|
||||
# except litellm.ContentPolicyViolationError:
|
||||
# pass # OpenAI randomly raises these errors - skip when they occur
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"An exception occurred - {str(e)}")
|
||||
# print(f"customHandler_success.errors: {customHandler_success.errors}")
|
||||
# print(f"customHandler_success.states: {customHandler_success.states}")
|
||||
# assert len(customHandler_success.errors) == 0
|
||||
# assert len(customHandler_success.states) == 3 # pre, post, success
|
||||
# test failure callback
|
||||
litellm.callbacks = [customHandler_failure]
|
||||
try:
|
||||
response = litellm.image_generation(
|
||||
prompt="A cute baby sea otter",
|
||||
model="dall-e-2",
|
||||
api_key="my-bad-api-key",
|
||||
)
|
||||
except:
|
||||
pass
|
||||
print(f"customHandler_failure.errors: {customHandler_failure.errors}")
|
||||
print(f"customHandler_failure.states: {customHandler_failure.states}")
|
||||
assert len(customHandler_failure.errors) == 0
|
||||
assert len(customHandler_failure.states) == 3 # pre, post, failure
|
||||
except litellm.RateLimitError as e:
|
||||
pass
|
||||
except litellm.ContentPolicyViolationError:
|
||||
pass # OpenAI randomly raises these errors - skip when they occur
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception occurred - {str(e)}")
|
||||
|
||||
|
||||
# test_image_generation_openai()
|
||||
test_image_generation_openai()
|
||||
## Test OpenAI + Async
|
||||
|
||||
## Test Azure + Sync
|
||||
|
|
|
@ -2030,8 +2030,9 @@ def client(original_function):
|
|||
start_time=start_time,
|
||||
)
|
||||
## check if metadata is passed in
|
||||
litellm_params = {}
|
||||
if "metadata" in kwargs:
|
||||
litellm_params = {"metadata": kwargs["metadata"]}
|
||||
litellm_params["metadata"] = kwargs["metadata"]
|
||||
logging_obj.update_environment_variables(
|
||||
model=model,
|
||||
user="",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue