forked from phoenix/litellm-mirror
fix(vertex_httpx.py): add better debug logging for vertex httpx
This commit is contained in:
parent
d880fb2619
commit
aef5cf3f22
1 changed files with 52 additions and 41 deletions
|
@ -366,8 +366,12 @@ class VertexLLM(BaseLLM):
|
|||
|
||||
## GET MODEL ##
|
||||
model_response.model = model
|
||||
|
||||
try:
|
||||
## GET TEXT ##
|
||||
chat_completion_message: ChatCompletionResponseMessage = {"role": "assistant"}
|
||||
chat_completion_message: ChatCompletionResponseMessage = {
|
||||
"role": "assistant"
|
||||
}
|
||||
content_str = ""
|
||||
tools: List[ChatCompletionToolCallChunk] = []
|
||||
for idx, candidate in enumerate(completion_response["candidates"]):
|
||||
|
@ -414,6 +418,13 @@ class VertexLLM(BaseLLM):
|
|||
)
|
||||
|
||||
setattr(model_response, "usage", usage)
|
||||
except Exception as e:
|
||||
raise VertexAIError(
|
||||
message="Received={}, Error converting to valid response block={}. File an issue if litellm error - https://github.com/BerriAI/litellm/issues".format(
|
||||
completion_response, str(e)
|
||||
),
|
||||
status_code=422,
|
||||
)
|
||||
|
||||
return model_response
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue