diff --git a/litellm/cost_calculator.py b/litellm/cost_calculator.py index a0645c19a4..a66a800026 100644 --- a/litellm/cost_calculator.py +++ b/litellm/cost_calculator.py @@ -24,7 +24,7 @@ from litellm.llms.anthropic.cost_calculation import ( ) from litellm.types.llms.openai import HttpxBinaryResponseContent from litellm.types.router import SPECIAL_MODEL_INFO_PARAMS -from litellm.types.utils import Usage +from litellm.types.utils import PassthroughCallTypes, Usage from litellm.utils import ( CallTypes, CostPerToken, @@ -625,6 +625,7 @@ def completion_cost( if ( call_type == CallTypes.image_generation.value or call_type == CallTypes.aimage_generation.value + or call_type == PassthroughCallTypes.passthrough_image_generation.value ): ### IMAGE GENERATION COST CALCULATION ### if custom_llm_provider == "vertex_ai": diff --git a/litellm/proxy/pass_through_endpoints/success_handler.py b/litellm/proxy/pass_through_endpoints/success_handler.py index 39f1d14ab7..f29129df1e 100644 --- a/litellm/proxy/pass_through_endpoints/success_handler.py +++ b/litellm/proxy/pass_through_endpoints/success_handler.py @@ -110,6 +110,7 @@ class PassThroughEndpointLogging: from litellm.llms.vertex_ai_and_google_ai_studio.vertex_embeddings.embedding_handler import ( transform_vertex_response_to_openai, ) + from litellm.types.utils import PassthroughCallTypes vertex_image_generation_class = VertexImageGeneration() @@ -127,6 +128,10 @@ class PassThroughEndpointLogging: model=model, ) ) + + logging_obj.call_type = ( + PassthroughCallTypes.passthrough_image_generation.value + ) else: litellm_model_response = await transform_vertex_response_to_openai( response=_json_response, diff --git a/litellm/tests/test_get_llm_provider.py b/litellm/tests/test_get_llm_provider.py index 4eef036a70..8f585a0720 100644 --- a/litellm/tests/test_get_llm_provider.py +++ b/litellm/tests/test_get_llm_provider.py @@ -72,6 +72,6 @@ def test_get_llm_provider_deepseek_custom_api_base(): def test_get_llm_provider_vertex_ai_image_models(): model, custom_llm_provider, dynamic_api_key, api_base = litellm.get_llm_provider( - model="imagegeneration@006", + model="imagegeneration@006", custom_llm_provider=None ) assert custom_llm_provider == "vertex_ai" diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 9e8c7be34f..d649a30f07 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -119,6 +119,10 @@ class CallTypes(Enum): speech = "speech" +class PassthroughCallTypes(Enum): + passthrough_image_generation = "passthrough-image-generation" + + class TopLogprob(OpenAIObject): token: str """The token."""