mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(vertex_ai.py): map finish reason
This commit is contained in:
parent
e7666d3908
commit
fe1fe70c64
3 changed files with 12 additions and 7 deletions
|
@ -4,7 +4,7 @@ from enum import Enum
|
|||
import requests
|
||||
import time
|
||||
from typing import Callable, Optional, Union
|
||||
from litellm.utils import ModelResponse, Usage, CustomStreamWrapper
|
||||
from litellm.utils import ModelResponse, Usage, CustomStreamWrapper, map_finish_reason
|
||||
import litellm, uuid
|
||||
import httpx
|
||||
|
||||
|
@ -575,9 +575,9 @@ def completion(
|
|||
model_response["model"] = model
|
||||
## CALCULATING USAGE
|
||||
if model in litellm.vertex_language_models and response_obj is not None:
|
||||
model_response["choices"][0].finish_reason = response_obj.candidates[
|
||||
0
|
||||
].finish_reason.name
|
||||
model_response["choices"][0].finish_reason = map_finish_reason(
|
||||
response_obj.candidates[0].finish_reason.name
|
||||
)
|
||||
usage = Usage(
|
||||
prompt_tokens=response_obj.usage_metadata.prompt_token_count,
|
||||
completion_tokens=response_obj.usage_metadata.candidates_token_count,
|
||||
|
@ -771,9 +771,9 @@ async def async_completion(
|
|||
model_response["model"] = model
|
||||
## CALCULATING USAGE
|
||||
if model in litellm.vertex_language_models and response_obj is not None:
|
||||
model_response["choices"][0].finish_reason = response_obj.candidates[
|
||||
0
|
||||
].finish_reason.name
|
||||
model_response["choices"][0].finish_reason = map_finish_reason(
|
||||
response_obj.candidates[0].finish_reason.name
|
||||
)
|
||||
usage = Usage(
|
||||
prompt_tokens=response_obj.usage_metadata.prompt_token_count,
|
||||
completion_tokens=response_obj.usage_metadata.candidates_token_count,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue