forked from phoenix/litellm-mirror
(test) Added completion and embedding tests for watsonx provider
This commit is contained in:
parent
f9a7456eaa
commit
9fc30e8b31
3 changed files with 73 additions and 0 deletions
|
@ -2565,6 +2565,41 @@ def test_completion_palm_stream():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
def test_completion_watsonx():
|
||||
litellm.set_verbose = True
|
||||
model_name = "watsonx/ibm/granite-13b-chat-v2"
|
||||
try:
|
||||
response = completion(
|
||||
model=model_name,
|
||||
messages=messages,
|
||||
stop=["stop"],
|
||||
max_tokens=20,
|
||||
)
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
except litellm.APIError as e:
|
||||
pass
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_acompletion_watsonx():
|
||||
litellm.set_verbose = True
|
||||
model_name = "watsonx/deployment/"+os.getenv("WATSONX_DEPLOYMENT_ID")
|
||||
print("testing watsonx")
|
||||
try:
|
||||
response = await litellm.acompletion(
|
||||
model=model_name,
|
||||
messages=messages,
|
||||
temperature=0.2,
|
||||
max_tokens=80,
|
||||
space_id=os.getenv("WATSONX_SPACE_ID_TEST"),
|
||||
)
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
# test_completion_palm_stream()
|
||||
|
||||
|
|
|
@ -483,6 +483,18 @@ def test_mistral_embeddings():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
def test_watsonx_embeddings():
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
response = litellm.embedding(
|
||||
model="watsonx/ibm/slate-30m-english-rtrvr",
|
||||
input=["good morning from litellm"],
|
||||
)
|
||||
print(f"response: {response}")
|
||||
assert isinstance(response.usage, litellm.Usage)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
# test_mistral_embeddings()
|
||||
|
||||
|
|
|
@ -1210,6 +1210,32 @@ def test_completion_sagemaker_stream():
|
|||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
def test_completion_watsonx_stream():
|
||||
litellm.set_verbose = True
|
||||
try:
|
||||
response = completion(
|
||||
model="watsonx/ibm/granite-13b-chat-v2",
|
||||
messages=messages,
|
||||
temperature=0.5,
|
||||
max_tokens=20,
|
||||
stream=True,
|
||||
)
|
||||
complete_response = ""
|
||||
has_finish_reason = False
|
||||
# Add any assertions here to check the response
|
||||
for idx, chunk in enumerate(response):
|
||||
chunk, finished = streaming_format_tests(idx, chunk)
|
||||
has_finish_reason = finished
|
||||
if finished:
|
||||
break
|
||||
complete_response += chunk
|
||||
if has_finish_reason is False:
|
||||
raise Exception("finish reason not set for last chunk")
|
||||
if complete_response.strip() == "":
|
||||
raise Exception("Empty response received")
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# test_completion_sagemaker_stream()
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue