forked from phoenix/litellm-mirror
fix(vertex_ai.py): parse out text response from response object
This commit is contained in:
parent
d244978247
commit
b0842e328c
2 changed files with 5 additions and 3 deletions
|
@ -112,7 +112,7 @@ def completion(
|
|||
model_response = chat.send_message_streaming(prompt, **optional_params)
|
||||
return model_response
|
||||
|
||||
completion_response = chat.send_message(prompt, **optional_params)
|
||||
completion_response = chat.send_message(prompt, **optional_params).text
|
||||
elif mode == "text":
|
||||
## LOGGING
|
||||
logging_obj.pre_call(input=prompt, api_key=None)
|
||||
|
|
|
@ -1026,12 +1026,14 @@ def test_completion_sagemaker():
|
|||
# test_completion_custom_api_base()
|
||||
|
||||
# def test_vertex_ai():
|
||||
# test_models = litellm.vertex_chat_models + litellm.vertex_code_chat_models + litellm.vertex_text_models + litellm.vertex_code_text_models
|
||||
# # test_models = litellm.vertex_chat_models + litellm.vertex_code_chat_models + litellm.vertex_text_models + litellm.vertex_code_text_models
|
||||
# test_models = ["chat-bison"]
|
||||
# for model in test_models:
|
||||
# try:
|
||||
# print("making request", model)
|
||||
# response = completion(model=model, messages=[{"role": "user", "content": "write code for saying hi"}])
|
||||
# response = completion(model="chat-bison", messages=[{'role': 'user', 'content': 'hi'}])
|
||||
# print(response)
|
||||
# assert type(response.choices[0].message.content) == str
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
# test_vertex_ai()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue