This commit is contained in:
Sian Cao 2025-04-24 15:35:03 +08:00 committed by GitHub
commit 821e8b327c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 39 additions and 0 deletions

View file

@ -949,6 +949,8 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
) = self._process_candidates(
_candidates, model_response, litellm_params
)
else:
model_response.choices.append(litellm.Choices())
usage = self._calculate_usage(completion_response=completion_response)
setattr(model_response, "usage", usage)

View file

@ -310,3 +310,40 @@ def test_vertex_ai_candidate_token_count_inclusive(
assert usage.prompt_tokens == expected_usage.prompt_tokens
assert usage.completion_tokens == expected_usage.completion_tokens
assert usage.total_tokens == expected_usage.total_tokens
def test_empty_candidates_response():
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import VertexGeminiConfig
import litellm
import httpx
model_response = litellm.ModelResponse()
completion_response = {
"usageMetadata": {
"promptTokenCount": 9291,
"totalTokenCount": 9291,
"promptTokensDetails": [{
"modality": "TEXT",
"tokenCount": 9291
}]
},
"modelVersion": "gemini-2.5-pro-preview-03-25"
}
raw_response = httpx.Response(200, json=completion_response)
config = VertexGeminiConfig()
result = config.transform_response(
model="gemini-2.5-pro-preview-03-25",
raw_response=raw_response,
model_response=model_response,
logging_obj=MagicMock(),
request_data={},
messages=[],
optional_params={},
litellm_params={},
encoding=None
)
assert len(result.choices) == 1
assert isinstance(result.choices[0], litellm.Choices)